summaryrefslogtreecommitdiff
path: root/src/debug/ee
diff options
context:
space:
mode:
authordotnet-bot <dotnet-bot@microsoft.com>2015-01-30 14:14:42 -0800
committerdotnet-bot <dotnet-bot@microsoft.com>2015-01-30 14:14:42 -0800
commitef1e2ab328087c61a6878c1e84f4fc5d710aebce (patch)
treedee1bbb89e9d722e16b0d1485e3cdd1b6c8e2cfa /src/debug/ee
downloadcoreclr-ef1e2ab328087c61a6878c1e84f4fc5d710aebce.tar.gz
coreclr-ef1e2ab328087c61a6878c1e84f4fc5d710aebce.tar.bz2
coreclr-ef1e2ab328087c61a6878c1e84f4fc5d710aebce.zip
Initial commit to populate CoreCLR repo
[tfs-changeset: 1407945]
Diffstat (limited to 'src/debug/ee')
-rw-r--r--src/debug/ee/.gitmirror1
-rw-r--r--src/debug/ee/CMakeLists.txt57
-rw-r--r--src/debug/ee/DIRS.proj20
-rw-r--r--src/debug/ee/DebuggerEE.vcproj107
-rw-r--r--src/debug/ee/EE.props63
-rw-r--r--src/debug/ee/amd64/.gitmirror1
-rw-r--r--src/debug/ee/amd64/amd64walker.cpp1182
-rw-r--r--src/debug/ee/amd64/dbghelpers.S148
-rw-r--r--src/debug/ee/amd64/dbghelpers.asm153
-rw-r--r--src/debug/ee/amd64/debuggerregdisplayhelper.cpp42
-rw-r--r--src/debug/ee/amd64/primitives.cpp14
-rw-r--r--src/debug/ee/arm/.gitmirror1
-rw-r--r--src/debug/ee/arm/armwalker.cpp408
-rw-r--r--src/debug/ee/arm/dbghelpers.asm91
-rw-r--r--src/debug/ee/arm/primitives.cpp38
-rw-r--r--src/debug/ee/arm64/.gitmirror1
-rw-r--r--src/debug/ee/arm64/arm64walker.cpp23
-rw-r--r--src/debug/ee/arm64/primitives.cpp16
-rw-r--r--src/debug/ee/canary.cpp325
-rw-r--r--src/debug/ee/canary.h81
-rw-r--r--src/debug/ee/controller.cpp8819
-rw-r--r--src/debug/ee/controller.h1966
-rw-r--r--src/debug/ee/controller.inl57
-rw-r--r--src/debug/ee/dac/.gitmirror1
-rw-r--r--src/debug/ee/dac/CMakeLists.txt4
-rw-r--r--src/debug/ee/dac/dirs.proj19
-rw-r--r--src/debug/ee/datatest.h59
-rw-r--r--src/debug/ee/dbgtransportproxy.cpp122
-rw-r--r--src/debug/ee/dbgtransportproxy.h51
-rw-r--r--src/debug/ee/ddunpack.cpp4578
-rw-r--r--src/debug/ee/ddunpack.h498
-rw-r--r--src/debug/ee/debugger.cpp17000
-rw-r--r--src/debug/ee/debugger.h3833
-rw-r--r--src/debug/ee/debugger.inl304
-rw-r--r--src/debug/ee/debuggermodule.cpp445
-rw-r--r--src/debug/ee/frameinfo.cpp2216
-rw-r--r--src/debug/ee/frameinfo.h210
-rw-r--r--src/debug/ee/funceval.cpp3990
-rw-r--r--src/debug/ee/functioninfo.cpp2474
-rw-r--r--src/debug/ee/i386/.gitmirror1
-rw-r--r--src/debug/ee/i386/dbghelpers.asm101
-rw-r--r--src/debug/ee/i386/debuggerregdisplayhelper.cpp19
-rw-r--r--src/debug/ee/i386/primitives.cpp12
-rw-r--r--src/debug/ee/i386/x86walker.cpp501
-rw-r--r--src/debug/ee/inprocdac.cpp432
-rw-r--r--src/debug/ee/inprocdac.h157
-rw-r--r--src/debug/ee/rcthread.cpp2209
-rw-r--r--src/debug/ee/shared.cpp16
-rw-r--r--src/debug/ee/stdafx.cpp13
-rw-r--r--src/debug/ee/stdafx.h40
-rw-r--r--src/debug/ee/walker.h237
-rw-r--r--src/debug/ee/wks/.gitmirror1
-rw-r--r--src/debug/ee/wks/CMakeLists.txt29
-rw-r--r--src/debug/ee/wks/wks.nativeproj41
54 files changed, 53227 insertions, 0 deletions
diff --git a/src/debug/ee/.gitmirror b/src/debug/ee/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/debug/ee/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/debug/ee/CMakeLists.txt b/src/debug/ee/CMakeLists.txt
new file mode 100644
index 0000000000..fff144a5b1
--- /dev/null
+++ b/src/debug/ee/CMakeLists.txt
@@ -0,0 +1,57 @@
+set(CMAKE_INCLUDE_CURRENT_DIR ON)
+
+add_definitions(-DFEATURE_NO_HOST)
+add_definitions(-D_TARGET_AMD64_=1)
+add_definitions(-DDBG_TARGET_64BIT=1)
+add_definitions(-DDBG_TARGET_AMD64=1)
+add_definitions(-DDBG_TARGET_WIN64=1)
+
+include_directories(BEFORE ${VM_DIR})
+include_directories(BEFORE ${VM_DIR}/${ARCH_SOURCES_DIR})
+include_directories(BEFORE ${CMAKE_CURRENT_SOURCE_DIR})
+
+set(CORDBEE_SOURCES_DAC_AND_WKS
+ controller.cpp
+ debugger.cpp
+ debuggermodule.cpp
+ functioninfo.cpp
+)
+
+set(CORDBEE_SOURCES_WKS
+ ${CORDBEE_SOURCES_DAC_AND_WKS}
+ funceval.cpp
+ rcthread.cpp
+ canary.cpp
+ shared.cpp
+ frameinfo.cpp
+ ${ARCH_SOURCES_DIR}/primitives.cpp
+ ${ARCH_SOURCES_DIR}/debuggerregdisplayhelper.cpp
+)
+
+set(CORDBEE_SOURCES_DAC
+ ${CORDBEE_SOURCES_DAC_AND_WKS}
+)
+
+if(WIN32)
+ set(CORDBEE_SOURCES_WKS
+ ${CORDBEE_SOURCES_WKS}
+ # The following files need to be ported to Linux
+ inprocdac.cpp
+ dbgtransportproxy.cpp
+ ddunpack.cpp
+ )
+endif(WIN32)
+
+if (IS_64BIT_BUILD EQUAL 1)
+ set(CORDBEE_SOURCES_WKS ${CORDBEE_SOURCES_WKS} amd64/amd64walker.cpp)
+else (IS_64BIT_BUILD EQUAL 1)
+ set(CORDBEE_SOURCES_WKS ${CORDBEE_SOURCES_WKS} i386/x86walker.cpp)
+endif (IS_64BIT_BUILD EQUAL 1)
+
+convert_to_absolute_path(CORDBEE_SOURCES_DAC ${CORDBEE_SOURCES_DAC})
+convert_to_absolute_path(CORDBEE_SOURCES_WKS ${CORDBEE_SOURCES_WKS})
+
+set(CORDBEE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
+
+add_subdirectory(dac)
+add_subdirectory(wks) \ No newline at end of file
diff --git a/src/debug/ee/DIRS.proj b/src/debug/ee/DIRS.proj
new file mode 100644
index 0000000000..63dd0c8afb
--- /dev/null
+++ b/src/debug/ee/DIRS.proj
@@ -0,0 +1,20 @@
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <!--Import the settings-->
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\clr.props" />
+
+ <!--The following projects will build during PHASE 1-->
+ <PropertyGroup>
+ <BuildInPhase1>true</BuildInPhase1>
+ <BuildInPhaseDefault>false</BuildInPhaseDefault>
+ <BuildCoreBinaries>true</BuildCoreBinaries>
+ <BuildSysBinaries>true</BuildSysBinaries>
+ </PropertyGroup>
+
+ <ItemGroup Condition="'$(BuildExePhase)' == '1'">
+ <ProjectFile Include="wks\wks.nativeproj" />
+ <ProjectFile Include="dac\dirs.proj" />
+ </ItemGroup>
+
+ <!--Import the targets-->
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\tools\Microsoft.DevDiv.Traversal.targets" />
+</Project>
diff --git a/src/debug/ee/DebuggerEE.vcproj b/src/debug/ee/DebuggerEE.vcproj
new file mode 100644
index 0000000000..6df51a0fd6
--- /dev/null
+++ b/src/debug/ee/DebuggerEE.vcproj
@@ -0,0 +1,107 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioProject
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="DebuggerEE"
+ ProjectGUID="{31EEC9FD-A233-4B36-8762-2D30A030C319}"
+ Keyword="MakeFileProj">
+ <Platforms>
+ <Platform
+ Name="Win32"/>
+ </Platforms>
+ <Configurations>
+ <Configuration
+ Name="Debug|Win32"
+ OutputDirectory="Debug"
+ IntermediateDirectory="Debug"
+ ConfigurationType="0">
+ <Tool
+ Name="VCNMakeTool"
+ Output="DebuggerEE.exe"/>
+ </Configuration>
+ <Configuration
+ Name="Release|Win32"
+ OutputDirectory="Release"
+ IntermediateDirectory="Release"
+ ConfigurationType="0">
+ <Tool
+ Name="VCNMakeTool"
+ Output="DebuggerEE.exe"/>
+ </Configuration>
+ </Configurations>
+ <References>
+ </References>
+ <Files>
+ <Filter
+ Name="Source Files"
+ Filter="cpp;c;cxx;def;odl;idl;hpj;bat;asm">
+ <File
+ RelativePath="controller.cpp">
+ </File>
+ <File
+ RelativePath="debugger.cpp">
+ </File>
+ <File
+ RelativePath="frameinfo.cpp">
+ </File>
+ <File
+ RelativePath="ilwalker.cpp">
+ </File>
+ <File
+ RelativePath="lscommon.cpp">
+ </File>
+ <File
+ RelativePath="lsdivalue.cpp">
+ </File>
+ <File
+ RelativePath="lshash.cpp">
+ </File>
+ <File
+ RelativePath="lsmodule.cpp">
+ </File>
+ <File
+ RelativePath="lsprocess.cpp">
+ </File>
+ <File
+ RelativePath="lsthread.cpp">
+ </File>
+ <File
+ RelativePath="rcthread.cpp">
+ </File>
+ <File
+ RelativePath="stdafx.cpp">
+ </File>
+ <File
+ RelativePath="thread.cpp">
+ </File>
+ <File
+ RelativePath="i386\x86walker.cpp">
+ </File>
+ </Filter>
+ <Filter
+ Name="Header Files"
+ Filter="h;hpp;hxx;hm;inl;inc">
+ <File
+ RelativePath="controller.h">
+ </File>
+ <File
+ RelativePath="debugger.h">
+ </File>
+ <File
+ RelativePath="frameinfo.h">
+ </File>
+ <File
+ RelativePath="stdafx.h">
+ </File>
+ <File
+ RelativePath="walker.h">
+ </File>
+ </Filter>
+ <Filter
+ Name="Resource Files"
+ Filter="rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe">
+ </Filter>
+ </Files>
+ <Globals>
+ </Globals>
+</VisualStudioProject>
diff --git a/src/debug/ee/EE.props b/src/debug/ee/EE.props
new file mode 100644
index 0000000000..409f5048a5
--- /dev/null
+++ b/src/debug/ee/EE.props
@@ -0,0 +1,63 @@
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <!--*****************************************************-->
+ <!--This MSBuild project file was automatically generated-->
+ <!--from the original SOURCES/DIRS file by the KBC tool.-->
+ <!--*****************************************************-->
+ <!--Import the settings-->
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\clr.props" />
+ <Import Project="$(Clrbase)\src\Debug\SetDebugTargetLocal.props" />
+ <!--Leaf project Properties-->
+ <PropertyGroup>
+ <UserIncludes>$(UserIncludes);
+ $(Clrbase)\src\Debug\EE;
+ $(Clrbase)\src\vm;
+ $(Clrbase)\src\vm\$(TargetCpu);
+ $(Clrbase)\src\Debug\inc;
+ $(Clrbase)\src\Debug\inc\$(TargetCpu);
+ $(Clrbase)\src\Debug\inc\dump;
+ $(VCToolsIncPath);
+ $(Clrbase)\src\strongname\inc</UserIncludes>
+ <ClAdditionalOptions>$(ClAdditionalOptions) -DUNICODE -D_UNICODE -DFEATURE_NO_HOST</ClAdditionalOptions>
+ <PCHHeader Condition="'$(CCOVER)' == ''">stdafx.h</PCHHeader>
+ <EnableCxxPCHHeaders Condition="'$(CCOVER)' == ''">true</EnableCxxPCHHeaders>
+ <!--PCH: Both precompiled header and cpp are on the same ..\ path this is likely to be wrong.-->
+ <PCHCompile Condition="'$(CCOVER)' == ''">$(Clrbase)\src\Debug\EE\stdafx.cpp</PCHCompile>
+ </PropertyGroup>
+ <!--Leaf Project Items-->
+ <ItemGroup>
+ <CppCompile Include="$(Clrbase)\src\Debug\EE\controller.cpp" />
+ <CppCompile Include="$(Clrbase)\src\Debug\EE\Debugger.cpp" />
+ <CppCompile Include="$(Clrbase)\src\Debug\EE\DebuggerModule.cpp" />
+ <CppCompile Include="$(Clrbase)\src\Debug\EE\functioninfo.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <SourcesNodac Include="$(Clrbase)\src\Debug\EE\funceval.cpp" />
+ <SourcesNodac Include="$(Clrbase)\src\Debug\EE\RCThread.cpp" />
+ <SourcesNodac Include="$(Clrbase)\src\Debug\EE\Canary.cpp" />
+ <SourcesNodac Include="$(Clrbase)\src\Debug\EE\shared.cpp" />
+ <SourcesNodac Include="$(Clrbase)\src\Debug\EE\frameinfo.cpp" />
+ <SourcesNodac Include="$(Clrbase)\src\Debug\EE\InProcDac.cpp" />
+ <SourcesNodac Include="$(Clrbase)\src\Debug\EE\DbgTransportProxy.cpp" />
+ <SourcesNodac Include="$(Clrbase)\src\Debug\EE\ddunpack.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <I386Sources Condition="'$(TargetArch)' == 'i386'" Include="$(Clrbase)\src\Debug\EE\i386\x86walker.cpp" />
+ <I386Sources Condition="'$(TargetArch)' == 'i386'" Include="$(Clrbase)\src\Debug\EE\i386\primitives.cpp" />
+ <I386Sources Condition="'$(TargetArch)' == 'i386'" Include="$(Clrbase)\src\Debug\EE\i386\DebuggerRegDisplayHelper.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <Amd64Sources Condition="'$(TargetArch)' == 'amd64'" Include="$(Clrbase)\src\Debug\EE\amd64\primitives.cpp" />
+ <Amd64Sources Condition="'$(TargetArch)' == 'amd64'" Include="$(Clrbase)\src\Debug\EE\amd64\Amd64walker.cpp" />
+ <Amd64Sources Condition="'$(TargetArch)' == 'amd64'" Include="$(Clrbase)\src\Debug\EE\amd64\DebuggerRegDisplayHelper.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ArmSources Condition="'$(TargetArch)' == 'arm'" Include="$(Clrbase)\src\Debug\EE.\arm\primitives.cpp" />
+ <ArmSources Condition="'$(TargetArch)' == 'arm'" Include="$(Clrbase)\src\Debug\EE\arm\ArmWalker.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <Arm64Sources Condition="'$(TargetArch)' == 'arm64'" Include="$(Clrbase)\src\Debug\EE\arm64\primitives.cpp" />
+ <Arm64Sources Condition="'$(TargetArch)' == 'arm64'" Include="$(Clrbase)\src\Debug\EE\arm64\Arm64Walker.cpp" />
+ </ItemGroup>
+
+ <!--Import the targets-->
+</Project>
diff --git a/src/debug/ee/amd64/.gitmirror b/src/debug/ee/amd64/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/debug/ee/amd64/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/debug/ee/amd64/amd64walker.cpp b/src/debug/ee/amd64/amd64walker.cpp
new file mode 100644
index 0000000000..06d8aeafcf
--- /dev/null
+++ b/src/debug/ee/amd64/amd64walker.cpp
@@ -0,0 +1,1182 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: Amd64walker.cpp
+//
+
+//
+// AMD64 instruction decoding/stepping logic
+//
+//*****************************************************************************
+
+#include "stdafx.h"
+
+#include "walker.h"
+
+#include "frames.h"
+#include "openum.h"
+
+#ifdef _TARGET_AMD64_
+
+//
+// The AMD64 walker is currently pretty minimal. It only recognizes call and return opcodes, plus a few jumps. The rest
+// is treated as unknown.
+//
+void NativeWalker::Decode()
+{
+ const BYTE *ip = m_ip;
+
+ m_type = WALK_UNKNOWN;
+ m_skipIP = NULL;
+ m_nextIP = NULL;
+
+ BYTE rex = NULL;
+
+ LOG((LF_CORDB, LL_INFO100000, "NW:Decode: m_ip 0x%x\n", m_ip));
+
+ BYTE prefix = *ip;
+ if (prefix == 0xcc)
+ {
+ prefix = (BYTE)DebuggerController::GetPatchedOpcode(m_ip);
+ LOG((LF_CORDB, LL_INFO100000, "NW:Decode 1st byte was patched, might have been prefix\n"));
+ }
+
+ //
+ // Skip instruction prefixes
+ //
+ do
+ {
+ switch (prefix)
+ {
+ // Segment overrides
+ case 0x26: // ES
+ case 0x2E: // CS
+ case 0x36: // SS
+ case 0x3E: // DS
+ case 0x64: // FS
+ case 0x65: // GS
+
+ // Size overrides
+ case 0x66: // Operand-Size
+ case 0x67: // Address-Size
+
+ // Lock
+ case 0xf0:
+
+ // String REP prefixes
+ case 0xf2: // REPNE/REPNZ
+ case 0xf3:
+ LOG((LF_CORDB, LL_INFO10000, "NW:Decode: prefix:%0.2x ", prefix));
+ ip++;
+ continue;
+
+ // REX register extension prefixes
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x45:
+ case 0x46:
+ case 0x47:
+ case 0x48:
+ case 0x49:
+ case 0x4a:
+ case 0x4b:
+ case 0x4c:
+ case 0x4d:
+ case 0x4e:
+ case 0x4f:
+ LOG((LF_CORDB, LL_INFO10000, "NW:Decode: REX prefix:%0.2x ", prefix));
+ // make sure to set rex to prefix, not *ip because *ip still represents the
+ // codestream which has a 0xcc in it.
+ rex = prefix;
+ ip++;
+ continue;
+
+ default:
+ break;
+ }
+ } while (0);
+
+ // Read the opcode
+ m_opcode = *ip++;
+
+ LOG((LF_CORDB, LL_INFO100000, "NW:Decode: ip 0x%x, m_opcode:%0.2x\n", ip, m_opcode));
+
+ // Don't remove this, when we did the check above for the prefix we didn't modify the codestream
+ // and since m_opcode was just taken directly from the code stream it will be patched if we
+ // didn't have a prefix
+ if (m_opcode == 0xcc)
+ {
+ m_opcode = (BYTE)DebuggerController::GetPatchedOpcode(m_ip);
+ LOG((LF_CORDB, LL_INFO100000, "NW:Decode after patch look up: m_opcode:%0.2x\n", m_opcode));
+ }
+
+ // Setup rex bits if needed
+ BYTE rex_b = 0;
+ BYTE rex_x = 0;
+ BYTE rex_r = 0;
+
+ if (rex != NULL)
+ {
+ rex_b = (rex & 0x1); // high bit to modrm r/m field or SIB base field or OPCODE reg field -- Hmm, when which?
+ rex_x = (rex & 0x2) >> 1; // high bit to sib index field
+ rex_r = (rex & 0x4) >> 2; // high bit to modrm reg field
+ }
+
+ // Analyze what we can of the opcode
+ switch (m_opcode)
+ {
+ case 0xff:
+ {
+
+ BYTE modrm = *ip++;
+
+ _ASSERT(modrm != NULL);
+
+ BYTE mod = (modrm & 0xC0) >> 6;
+ BYTE reg = (modrm & 0x38) >> 3;
+ BYTE rm = (modrm & 0x07);
+
+ reg |= (rex_r << 3);
+ rm |= (rex_b << 3);
+
+ if ((reg < 2) || (reg > 5 && reg < 8) || (reg > 15)) {
+ // not a valid register for a CALL or BRANCH
+ return;
+ }
+
+ BYTE *result;
+ WORD displace;
+
+ // See: Tables A-15,16,17 in AMD Dev Manual 3 for information
+ // about how the ModRM/SIB/REX bytes interact.
+
+ switch (mod)
+ {
+ case 0:
+ case 1:
+ case 2:
+ if ((rm & 0x07) == 4) // we have an SIB byte following
+ {
+ //
+ // Get values from the SIB byte
+ //
+ BYTE sib = *ip;
+
+ _ASSERT(sib != NULL);
+
+ BYTE ss = (sib & 0xC0) >> 6;
+ BYTE index = (sib & 0x38) >> 3;
+ BYTE base = (sib & 0x07);
+
+ index |= (rex_x << 3);
+ base |= (rex_b << 3);
+
+ ip++;
+
+ //
+ // Get starting value
+ //
+ if ((mod == 0) && ((base & 0x07) == 5))
+ {
+ result = 0;
+ }
+ else
+ {
+ result = (BYTE *)(size_t)GetRegisterValue(base);
+ }
+
+ //
+ // Add in the [index]
+ //
+ if (index != 0x4)
+ {
+ result = result + (GetRegisterValue(index) << ss);
+ }
+
+ //
+ // Finally add in the offset
+ //
+ if (mod == 0)
+ {
+ if ((base & 0x07) == 5)
+ {
+ result = result + *((UINT32*)ip);
+ displace = 7;
+ }
+ else
+ {
+ displace = 3;
+ }
+ }
+ else if (mod == 1)
+ {
+ result = result + *((UINT8*)ip);
+ displace = 4;
+ }
+ else // mod == 2
+ {
+ result = result + *((UINT32*)ip);
+ displace = 7;
+ }
+
+ }
+ else
+ {
+ //
+ // Get the value we need from the register.
+ //
+
+ // Check for RIP-relative addressing mode.
+ if ((mod == 0) && ((rm & 0x07) == 5))
+ {
+ displace = 6; // 1 byte opcode + 1 byte modrm + 4 byte displacement (signed)
+ result = const_cast<BYTE *>(m_ip) + displace + *(reinterpret_cast<const INT32*>(ip));
+ }
+ else
+ {
+ result = (BYTE *)GetRegisterValue(rm);
+
+ if (mod == 0)
+ {
+ displace = 2;
+ }
+ else if (mod == 1)
+ {
+ result = result + *((UINT8*)ip);
+ displace = 3;
+ }
+ else // mod == 2
+ {
+ result = result + *((UINT32*)ip);
+ displace = 6;
+ }
+ }
+ }
+
+ //
+ // Now dereference thru the result to get the resulting IP.
+ //
+ result = (BYTE *)(*((UINT64*)result));
+
+ break;
+
+ case 3:
+ default:
+ // The operand is stored in a register.
+ result = (BYTE *)GetRegisterValue(rm);
+ displace = 2;
+
+ break;
+
+ }
+
+ // the instruction uses r8-r15, add in the extra byte to the displacement
+ // for the REX prefix which was used to specify the extended register
+ if (rex != NULL)
+ {
+ displace++;
+ }
+
+ // because we already checked register validity for CALL/BRANCH
+ // instructions above we can assume that there is no other option
+ if ((reg == 4) || (reg == 5))
+ {
+ m_type = WALK_BRANCH;
+ }
+ else
+ {
+ m_type = WALK_CALL;
+ }
+ m_nextIP = result;
+ m_skipIP = m_ip + displace;
+ break;
+ }
+ case 0xe8:
+ {
+ m_type = WALK_CALL;
+
+ // Sign-extend the displacement is necessary.
+ INT32 disp = *((INT32*)ip);
+ m_nextIP = ip + 4 + (disp < 0 ? (disp | 0xffffffff00000000) : disp);
+ m_skipIP = ip + 4;
+
+ break;
+ }
+ case 0xe9:
+ {
+ m_type = WALK_BRANCH;
+
+ // Sign-extend the displacement is necessary.
+ INT32 disp = *((INT32*)ip);
+ m_nextIP = ip + 4 + (disp < 0 ? (disp | 0xffffffff00000000) : disp);
+ m_skipIP = ip + 4;
+
+ break;
+ }
+ case 0xc2:
+ case 0xc3:
+ case 0xca:
+ case 0xcb:
+ {
+ m_type = WALK_RETURN;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+
+//
+// Given a regdisplay and a register number, return the value of the register.
+//
+
+UINT64 NativeWalker::GetRegisterValue(int registerNumber)
+{
+ if (m_registers == NULL) {
+ return 0;
+ }
+
+ switch (registerNumber)
+ {
+ case 0:
+ return m_registers->pCurrentContext->Rax;
+ break;
+ case 1:
+ return m_registers->pCurrentContext->Rcx;
+ break;
+ case 2:
+ return m_registers->pCurrentContext->Rdx;
+ break;
+ case 3:
+ return m_registers->pCurrentContext->Rbx;
+ break;
+ case 4:
+ return m_registers->pCurrentContext->Rsp;
+ break;
+ case 5:
+ return m_registers->pCurrentContext->Rbp;
+ break;
+ case 6:
+ return m_registers->pCurrentContext->Rsi;
+ break;
+ case 7:
+ return m_registers->pCurrentContext->Rdi;
+ break;
+ case 8:
+ return m_registers->pCurrentContext->R8;
+ break;
+ case 9:
+ return m_registers->pCurrentContext->R9;
+ break;
+ case 10:
+ return m_registers->pCurrentContext->R10;
+ break;
+ case 11:
+ return m_registers->pCurrentContext->R11;
+ break;
+ case 12:
+ return m_registers->pCurrentContext->R12;
+ break;
+ case 13:
+ return m_registers->pCurrentContext->R13;
+ break;
+ case 14:
+ return m_registers->pCurrentContext->R14;
+ break;
+ case 15:
+ return m_registers->pCurrentContext->R15;
+ break;
+ default:
+ _ASSERTE(!"Invalid register number!");
+ }
+
+ return 0;
+}
+
+
+// mod reg r/m
+// bits 7-6 5-3 2-0
+struct ModRMByte
+{
+ BYTE rm :3;
+ BYTE reg:3;
+ BYTE mod:2;
+};
+
+// fixed W R X B
+// bits 7-4 3 2 1 0
+struct RexByte
+{
+ BYTE b:1;
+ BYTE x:1;
+ BYTE r:1;
+ BYTE w:1;
+ BYTE fixed:4;
+};
+
+// static
+void NativeWalker::DecodeInstructionForPatchSkip(const BYTE *address, InstructionAttribute * pInstrAttrib)
+{
+ //
+ // Skip instruction prefixes
+ //
+
+ LOG((LF_CORDB, LL_INFO10000, "Patch decode: "));
+
+ // for reads and writes where the destination is a RIP-relative address pInstrAttrib->m_cOperandSize will contain the size in bytes of the pointee; in all other
+ // cases it will be zero. if the RIP-relative address is being written to then pInstrAttrib->m_fIsWrite will be true; in all other cases it will be false.
+ // similar to cbImmedSize in some cases we'll set pInstrAttrib->m_cOperandSize to 0x3 meaning that the prefix will determine the size if one is specified.
+ pInstrAttrib->m_cOperandSize = 0;
+ pInstrAttrib->m_fIsWrite = false;
+
+ if (pInstrAttrib == NULL)
+ {
+ return;
+ }
+
+ // These three legacy prefixes are used to modify some of the two-byte opcodes.
+ bool fPrefix66 = false;
+ bool fPrefixF2 = false;
+ bool fPrefixF3 = false;
+
+ bool fRex = false;
+ bool fModRM = false;
+
+ RexByte rex = {0};
+ ModRMByte modrm = {0};
+
+ // We use 0x3 to indicate that we need to look at the operand-size override and the rex byte
+ // to determine whether the immediate size is 2 bytes or 4 bytes.
+ BYTE cbImmedSize = 0;
+
+ const BYTE* originalAddr = address;
+
+ do
+ {
+ switch (*address)
+ {
+ // Operand-Size override
+ case 0x66:
+ fPrefix66 = true;
+ goto LLegacyPrefix;
+
+ // Repeat (REP/REPE/REPZ)
+ case 0xf2:
+ fPrefixF2 = true;
+ goto LLegacyPrefix;
+
+ // Repeat (REPNE/REPNZ)
+ case 0xf3:
+ fPrefixF3 = true;
+ goto LLegacyPrefix;
+
+ // Address-Size override
+ case 0x67: // fall through
+
+ // Segment overrides
+ case 0x26: // ES
+ case 0x2E: // CS
+ case 0x36: // SS
+ case 0x3E: // DS
+ case 0x64: // FS
+ case 0x65: // GS // fall through
+
+ // Lock
+ case 0xf0:
+LLegacyPrefix:
+ LOG((LF_CORDB, LL_INFO10000, "prefix:%0.2x ", *address));
+ address++;
+ continue;
+
+ // REX register extension prefixes
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x44:
+ case 0x45:
+ case 0x46:
+ case 0x47:
+ case 0x48:
+ case 0x49:
+ case 0x4a:
+ case 0x4b:
+ case 0x4c:
+ case 0x4d:
+ case 0x4e:
+ case 0x4f:
+ LOG((LF_CORDB, LL_INFO10000, "prefix:%0.2x ", *address));
+ fRex = true;
+ rex = *(RexByte*)address;
+ address++;
+ continue;
+
+ default:
+ break;
+ }
+ } while (0);
+
+ pInstrAttrib->Reset();
+
+ BYTE opcode0 = *address;
+ BYTE opcode1 = *(address + 1); // this is only valid if the first opcode byte is 0x0F
+
+ // Handle AVX encodings. Note that these can mostly be handled as if they are aliases
+ // for a corresponding SSE encoding.
+ // See Figure 2-9 in "Intel 64 and IA-32 Architectures Software Developer's Manual".
+
+ if (opcode0 == 0xC4 || opcode0 == 0xC5)
+ {
+ BYTE pp;
+ if (opcode0 == 0xC4)
+ {
+ BYTE opcode2 = *(address + 2);
+ address++;
+
+ // REX bits are encoded in inverted form.
+ // R,X, and B are the top bits (in that order) of opcode1.
+ // W is the top bit of opcode2.
+ if ((opcode1 & 0x80) != 0)
+ {
+ rex.b = 1;
+ fRex = true;
+ }
+ if ((opcode1 & 0x40) == 0)
+ {
+ rex.x = 1;
+ fRex = true;
+ }
+ if ((opcode1 & 0x20) == 0)
+ {
+ rex.b = 1;
+ fRex = true;
+ }
+ if ((opcode2 & 0x80) != 0)
+ {
+ rex.w = 1;
+ fRex = true;
+ }
+
+ pp = opcode2 & 0x3;
+
+ BYTE mmBits = opcode1 & 0x1f;
+ BYTE impliedOpcode1 = 0;
+ switch(mmBits)
+ {
+ case 1: break; // No implied leading byte.
+ case 2: impliedOpcode1 = 0x38; break;
+ case 3: impliedOpcode1 = 0x3A; break;
+ default: _ASSERTE(!"NW::DIFPS - invalid opcode"); break;
+ }
+
+ if (impliedOpcode1 != 0)
+ {
+ opcode1 = impliedOpcode1;
+ }
+ else
+ {
+ opcode1 = *address;
+ address++;
+ }
+ }
+ else
+ {
+ pp = opcode1 & 0x3;
+ if ((opcode1 & 0x80) == 0)
+ {
+ // The two-byte VEX encoding only encodes the 'R' bit.
+ fRex = true;
+ rex.r = 1;
+ }
+ opcode1 = *address;
+ address++;
+ }
+ opcode0 = 0x0f;
+ switch (pp)
+ {
+ case 1: fPrefix66 = true; break;
+ case 2: fPrefixF3 = true; break;
+ case 3: fPrefixF2 = true; break;
+ }
+ }
+
+ // The following opcode decoding follows the tables in "Appendix A Opcode and Operand Encodings" of
+ // "AMD64 Architecture Programmer's Manual Volume 3"
+
+ // one-byte opcodes
+ if (opcode0 != 0x0F)
+ {
+ BYTE highNibble = (opcode0 & 0xF0) >> 4;
+ BYTE lowNibble = (opcode0 & 0x0F);
+
+ switch (highNibble)
+ {
+ case 0x0:
+ case 0x1:
+ case 0x2:
+ case 0x3:
+ if ((lowNibble == 0x6) || (lowNibble == 0x7) || (lowNibble == 0xE) || (lowNibble == 0xF))
+ {
+ _ASSERTE(!"NW::DIFPS - invalid opcode");
+ }
+
+ // CMP
+ if ( (lowNibble <= 0x3) ||
+ ((lowNibble >= 0x8) && (lowNibble <= 0xB)) )
+ {
+ fModRM = true;
+ }
+
+ // ADD/XOR reg/mem, reg
+ if (lowNibble == 0x0)
+ {
+ pInstrAttrib->m_cOperandSize = 0x1;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ else if (lowNibble == 0x1)
+ {
+ pInstrAttrib->m_cOperandSize = 0x3;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ // XOR reg, reg/mem
+ else if (lowNibble == 0x2)
+ {
+ pInstrAttrib->m_cOperandSize = 0x1;
+ }
+ else if (lowNibble == 0x3)
+ {
+ pInstrAttrib->m_cOperandSize = 0x3;
+ }
+
+ break;
+
+ case 0x4:
+ case 0x5:
+ break;
+
+ case 0x6:
+ // IMUL
+ if (lowNibble == 0x9)
+ {
+ fModRM = true;
+ cbImmedSize = 0x3;
+ }
+ else if (lowNibble == 0xB)
+ {
+ fModRM = true;
+ cbImmedSize = 0x1;
+ }
+ else if (lowNibble == 0x3)
+ {
+ if (fRex)
+ {
+ // MOVSXD
+ fModRM = true;
+ }
+ }
+ break;
+
+ case 0x7:
+ break;
+
+ case 0x8:
+ fModRM = true;
+
+ // Group 1: lowNibble in [0x0, 0x3]
+ _ASSERTE(lowNibble != 0x2);
+
+ // ADD/XOR reg/mem, imm
+ if (lowNibble == 0x0)
+ {
+ cbImmedSize = 1;
+ pInstrAttrib->m_cOperandSize = 1;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ else if (lowNibble == 0x1)
+ {
+ cbImmedSize = 3;
+ pInstrAttrib->m_cOperandSize = 3;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ else if (lowNibble == 0x3)
+ {
+ cbImmedSize = 1;
+ pInstrAttrib->m_cOperandSize = 3;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ // MOV reg/mem, reg
+ else if (lowNibble == 0x8)
+ {
+ pInstrAttrib->m_cOperandSize = 0x1;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ else if (lowNibble == 0x9)
+ {
+ pInstrAttrib->m_cOperandSize = 0x3;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ // MOV reg, reg/mem
+ else if (lowNibble == 0xA)
+ {
+ pInstrAttrib->m_cOperandSize = 0x1;
+ }
+ else if (lowNibble == 0xB)
+ {
+ pInstrAttrib->m_cOperandSize = 0x3;
+ }
+
+ break;
+
+ case 0x9:
+ case 0xA:
+ case 0xB:
+ break;
+
+ case 0xC:
+ if ((lowNibble == 0x4) || (lowNibble == 0x5) || (lowNibble == 0xE))
+ {
+ _ASSERTE(!"NW::DIFPS - invalid opcode");
+ }
+
+ // RET
+ if ((lowNibble == 0x2) && (lowNibble == 0x3))
+ {
+ break;
+ }
+
+ // Group 2 (part 1): lowNibble in [0x0, 0x1]
+ // RCL reg/mem, imm
+ if (lowNibble == 0x0)
+ {
+ fModRM = true;
+ cbImmedSize = 0x1;
+ pInstrAttrib->m_cOperandSize = 0x1;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ else if (lowNibble == 0x1)
+ {
+ fModRM = true;
+ cbImmedSize = 0x1;
+ pInstrAttrib->m_cOperandSize = 0x3;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ // Group 11: lowNibble in [0x6, 0x7]
+ // MOV reg/mem, imm
+ else if (lowNibble == 0x6)
+ {
+ fModRM = true;
+ cbImmedSize = 1;
+ pInstrAttrib->m_cOperandSize = 1;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ else if (lowNibble == 0x7)
+ {
+ fModRM = true;
+ cbImmedSize = 3;
+ pInstrAttrib->m_cOperandSize = 3;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ break;
+
+ case 0xD:
+ // Group 2 (part 2): lowNibble in [0x0, 0x3]
+ // RCL reg/mem, 1/reg
+ if (lowNibble == 0x0 || lowNibble == 0x2)
+ {
+ fModRM = true;
+ pInstrAttrib->m_cOperandSize = 0x1;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ else if (lowNibble == 0x1 || lowNibble == 0x3)
+ {
+ fModRM = true;
+ pInstrAttrib->m_cOperandSize = 0x3;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+
+ // x87 instructions: lowNibble in [0x8, 0xF]
+ // - the entire ModRM byte is used to modify the opcode,
+ // so the ModRM byte cannot be used in RIP-relative addressing
+ break;
+
+ case 0xE:
+ break;
+
+ case 0xF:
+ // Group 3: lowNibble in [0x6, 0x7]
+ // TEST
+ if ((lowNibble == 0x6) || (lowNibble == 0x7))
+ {
+ fModRM = true;
+
+ modrm = *(ModRMByte*)(address + 1);
+ if ((modrm.reg == 0x0) || (modrm.reg == 0x1))
+ {
+ if (lowNibble == 0x6)
+ {
+ cbImmedSize = 0x1;
+ }
+ else
+ {
+ cbImmedSize = 0x3;
+ }
+ }
+ }
+ // Group 4: lowNibble == 0xE
+ // INC reg/mem
+ else if (lowNibble == 0xE)
+ {
+ fModRM = true;
+ pInstrAttrib->m_cOperandSize = 1;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ // Group 5: lowNibble == 0xF
+ else if (lowNibble == 0xF)
+ {
+ fModRM = true;
+ pInstrAttrib->m_cOperandSize = 3;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ break;
+ }
+
+ address += 1;
+ if (fModRM)
+ {
+ modrm = *(ModRMByte*)address;
+ address += 1;
+ }
+ }
+ // two-byte opcodes
+ else
+ {
+ BYTE highNibble = (opcode1 & 0xF0) >> 4;
+ BYTE lowNibble = (opcode1 & 0x0F);
+
+ switch (highNibble)
+ {
+ case 0x0:
+ // Group 6: lowNibble == 0x0
+ if (lowNibble == 0x0)
+ {
+ fModRM = true;
+ }
+ // Group 7: lowNibble == 0x1
+ else if (lowNibble == 0x1)
+ {
+ fModRM = true;
+ }
+ else if ((lowNibble == 0x2) || (lowNibble == 0x3))
+ {
+ fModRM = true;
+ }
+ // Group p: lowNibble == 0xD
+ else if (lowNibble == 0xD)
+ {
+ fModRM = true;
+ }
+ // 3DNow! instructions: lowNibble == 0xF
+ // - all 3DNow! instructions use the ModRM byte
+ else if (lowNibble == 0xF)
+ {
+ fModRM = true;
+ cbImmedSize = 0x1;
+ }
+ break;
+
+ case 0x1: // Group 16: lowNibble == 0x8
+ // MOVSS xmm, xmm/mem (low nibble 0x0)
+ // MOVSS xmm/mem, xmm (low nibble 0x1)
+ if (lowNibble <= 0x1)
+ {
+ fModRM = true;
+ if (fPrefixF2 || fPrefixF3)
+ pInstrAttrib->m_cOperandSize = 0x8;
+ else
+ pInstrAttrib->m_cOperandSize = 0x10;
+
+ if (lowNibble == 0x1)
+ pInstrAttrib->m_fIsWrite = true;
+
+ break;
+ }
+ case 0x2: // fall through
+ fModRM = true;
+ if (lowNibble == 0x8 || lowNibble == 0x9)
+ {
+ pInstrAttrib->m_cOperandSize = 0x10;
+
+ if (lowNibble == 0x9)
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ break;
+
+ case 0x3:
+ break;
+
+ case 0x4:
+ case 0x5:
+ case 0x6: // fall through
+ fModRM = true;
+ break;
+
+ case 0x7:
+ if (lowNibble == 0x0)
+ {
+ fModRM = true;
+ cbImmedSize = 0x1;
+ }
+ else if ((lowNibble >= 0x1) && (lowNibble <= 0x3))
+ {
+ _ASSERTE(!fPrefixF2 && !fPrefixF3);
+
+ // Group 12: lowNibble == 0x1
+ // Group 13: lowNibble == 0x2
+ // Group 14: lowNibble == 0x3
+ fModRM = true;
+ cbImmedSize = 0x1;
+ }
+ else if ((lowNibble >= 0x4) && (lowNibble <= 0x6))
+ {
+ fModRM = true;
+ }
+ // MOVD reg/mem, mmx for 0F 7E
+ else if ((lowNibble == 0xE) || (lowNibble == 0xF))
+ {
+ _ASSERTE(!fPrefixF2);
+
+ fModRM = true;
+ }
+ break;
+
+ case 0x8:
+ break;
+
+ case 0x9:
+ fModRM = true;
+ break;
+
+ case 0xA:
+ if ((lowNibble >= 0x3) && (lowNibble <= 0x5))
+ {
+ // BT reg/mem, reg
+ fModRM = true;
+ if (lowNibble == 0x3)
+ {
+ pInstrAttrib->m_cOperandSize = 0x3;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ // SHLD reg/mem, imm
+ else if (lowNibble == 0x4)
+ {
+ cbImmedSize = 0x1;
+ }
+ }
+ else if (lowNibble >= 0xB)
+ {
+ fModRM = true;
+ // BTS reg/mem, reg
+ if (lowNibble == 0xB)
+ {
+ pInstrAttrib->m_cOperandSize = 0x3;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ // SHRD reg/mem, imm
+ else if (lowNibble == 0xC)
+ {
+ cbImmedSize = 0x1;
+ }
+ // Group 15: lowNibble == 0xE
+ }
+ break;
+
+ case 0xB:
+ // Group 10: lowNibble == 0x9
+ // - this entire group is invalid
+ _ASSERTE((lowNibble != 0x8) && (lowNibble != 0x9));
+
+ fModRM = true;
+ // CMPXCHG reg/mem, reg
+ if (lowNibble == 0x0)
+ {
+ pInstrAttrib->m_cOperandSize = 0x1;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ else if (lowNibble == 0x1)
+ {
+ pInstrAttrib->m_cOperandSize = 0x3;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ // Group 8: lowNibble == 0xA
+ // BTS reg/mem, imm
+ else if (lowNibble == 0xA)
+ {
+ cbImmedSize = 0x1;
+ pInstrAttrib->m_cOperandSize = 0x3;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ // MOVSX reg, reg/mem
+ else if (lowNibble == 0xE)
+ {
+ pInstrAttrib->m_cOperandSize = 1;
+ }
+ else if (lowNibble == 0xF)
+ {
+ pInstrAttrib->m_cOperandSize = 2;
+ }
+ break;
+
+ case 0xC:
+ if (lowNibble <= 0x7)
+ {
+ fModRM = true;
+ // XADD reg/mem, reg
+ if (lowNibble == 0x0)
+ {
+ pInstrAttrib->m_cOperandSize = 0x1;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ else if (lowNibble == 0x1)
+ {
+ pInstrAttrib->m_cOperandSize = 0x3;
+ pInstrAttrib->m_fIsWrite = true;
+ }
+ else if ( (lowNibble == 0x2) ||
+ ((lowNibble >= 0x4) && (lowNibble <= 0x6)) )
+ {
+ cbImmedSize = 0x1;
+ }
+ }
+ break;
+
+ case 0xD:
+ case 0xE:
+ case 0xF: // fall through
+ fModRM = true;
+ break;
+ }
+
+ address += 2;
+ if (fModRM)
+ {
+ modrm = *(ModRMByte*)address;
+ address += 1;
+ }
+ }
+
+ // Check for RIP-relative addressing
+ if (fModRM && (modrm.mod == 0x0) && (modrm.rm == 0x5))
+ {
+ // SIB byte cannot be present with RIP-relative addressing.
+
+ pInstrAttrib->m_dwOffsetToDisp = (DWORD)(address - originalAddr);
+ _ASSERTE(pInstrAttrib->m_dwOffsetToDisp <= MAX_INSTRUCTION_LENGTH);
+
+ // Add 4 to the address for the displacement.
+ address += 4;
+
+ // Further adjust the address by the size of the cbImmedSize (if any).
+ if (cbImmedSize == 0x3)
+ {
+ // The size of the cbImmedSizeiate depends on the effective operand size:
+ // 2 bytes if the effective operand size is 16-bit, or
+ // 4 bytes if the effective operand size is 32- or 64-bit.
+ if (fPrefix66)
+ {
+ cbImmedSize = 0x2;
+ }
+ else
+ {
+ cbImmedSize = 0x4;
+ }
+ }
+ address += cbImmedSize;
+
+ // if this is a read or write to a RIP-relative address then update pInstrAttrib->m_cOperandSize with the size of the pointee.
+ if (pInstrAttrib->m_cOperandSize == 0x3)
+ {
+ if (fPrefix66)
+ pInstrAttrib->m_cOperandSize = 0x2; // WORD*
+ else
+ pInstrAttrib->m_cOperandSize = 0x4; // DWORD*
+
+ if (fRex && rex.w == 0x1)
+ {
+ _ASSERTE(pInstrAttrib->m_cOperandSize == 0x4);
+ pInstrAttrib->m_cOperandSize = 0x8; // QWORD*
+ }
+ }
+
+ pInstrAttrib->m_cbInstr = (DWORD)(address - originalAddr);
+ _ASSERTE(pInstrAttrib->m_cbInstr <= MAX_INSTRUCTION_LENGTH);
+ }
+ else
+ {
+ // not a RIP-relative address so set to default values
+ pInstrAttrib->m_cOperandSize = 0;
+ pInstrAttrib->m_fIsWrite = false;
+ }
+
+ //
+ // Look at opcode to tell if it's a call or an
+ // absolute branch.
+ //
+ switch (opcode0)
+ {
+ case 0xC2: // RET
+ case 0xC3: // RET N
+ pInstrAttrib->m_fIsAbsBranch = true;
+ LOG((LF_CORDB, LL_INFO10000, "ABS:%0.2x\n", opcode0));
+ break;
+
+ case 0xE8: // CALL relative
+ pInstrAttrib->m_fIsCall = true;
+ LOG((LF_CORDB, LL_INFO10000, "CALL REL:%0.2x\n", opcode0));
+ break;
+
+ case 0xC8: // ENTER
+ pInstrAttrib->m_fIsCall = true;
+ pInstrAttrib->m_fIsAbsBranch = true;
+ LOG((LF_CORDB, LL_INFO10000, "CALL ABS:%0.2x\n", opcode0));
+ break;
+
+ case 0xFF: // CALL/JMP modr/m
+ //
+ // Read opcode modifier from modr/m
+ //
+
+ _ASSERTE(fModRM);
+ switch (modrm.reg)
+ {
+ case 2:
+ case 3:
+ pInstrAttrib->m_fIsCall = true;
+ // fall through
+ case 4:
+ case 5:
+ pInstrAttrib->m_fIsAbsBranch = true;
+ }
+ LOG((LF_CORDB, LL_INFO10000, "CALL/JMP modr/m:%0.2x\n", opcode0));
+ break;
+
+ default:
+ LOG((LF_CORDB, LL_INFO10000, "NORMAL:%0.2x\n", opcode0));
+ }
+
+ if (pInstrAttrib->m_cOperandSize == 0x0)
+ {
+ // if an operand size wasn't computed (likely because the decoder didn't understand the instruction) then set
+ // the size to the max buffer size. this is a fall-back to the dev10 behavior and is applicable for reads only.
+ _ASSERTE(!pInstrAttrib->m_fIsWrite);
+ pInstrAttrib->m_cOperandSize = SharedPatchBypassBuffer::cbBufferBypass;
+ }
+}
+
+
+#endif
diff --git a/src/debug/ee/amd64/dbghelpers.S b/src/debug/ee/amd64/dbghelpers.S
new file mode 100644
index 0000000000..f652312175
--- /dev/null
+++ b/src/debug/ee/amd64/dbghelpers.S
@@ -0,0 +1,148 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+
+//extern FuncEvalHijackWorker:proc
+//extern FuncEvalHijackPersonalityRoutine:proc
+
+// @dbgtodo- once we port Funceval, use the ExceptionHijack stub instead of this func-eval stub.
+NESTED_ENTRY FuncEvalHijack, _TEXT, FuncEvalHijackPersonalityRoutine
+ // the stack should be aligned at this point, since we do not call this
+ // function explicitly
+ alloc_stack 20h
+ END_PROLOGUE
+
+ mov [rsp], rdi
+ call FuncEvalHijackWorker
+
+ //
+ // The following nop is crucial. It is important that the OS *not* recognize
+ // the instruction immediately following the call above as an epilog, if it
+ // does recognize it as an epilogue, it unwinds this function itself rather
+ // than calling our personality routine to do the unwind, and then stack
+ // tracing is hosed.
+ //
+ nop
+
+ //
+ // epilogue
+ //
+ add rsp, 20h
+ TAILJMP_RAX
+NESTED_END FuncEvalHijack, _TEXT
+
+
+//extern ExceptionHijackWorker:proc
+//extern ExceptionHijackPersonalityRoutine:proc
+
+// This is the general purpose hijacking stub. The DacDbi Hijack primitive will
+// set up the stack and then set the IP here, and so this just makes the call.
+NESTED_ENTRY ExceptionHijack, _TEXT, ExceptionHijackPersonalityRoutine
+ // the stack should be aligned at this point, since we do not call this
+ // function explicitly
+ //
+ // There is a problem here. The Orcas assembler doesn't like a 0-sized stack frame.
+ // So we allocate 4 stack slots as the outgoing argument home and just copy the
+ // arguments set up by DacDbi into these stack slots. We will take a perf hit,
+ // but this is not a perf critical code path anyway.
+ alloc_stack 20h
+ END_PROLOGUE
+
+ // We used to do an "alloc_stack 0h" because the stack has been allocated for us
+ // by the OOP hijacking routine. Our arguments have also been pushed onto the
+ // stack for us. However, the Orcas compilers don't like a 0-sized frame, so
+ // we need to allocate something here and then just copy the stack arguments to
+ // their new argument homes.
+ mov rax, [rsp + 20h]
+ mov [rsp], rax
+ mov rax, [rsp + 28h]
+ mov [rsp + 8h], rax
+ mov rax, [rsp + 30h]
+ mov [rsp + 10h], rax
+ mov rax, [rsp + 38h]
+ mov [rsp + 18h], rax
+
+ // DD Hijack primitive already set the stack. So just make the call now.
+ call ExceptionHijackWorker
+
+ //
+ // The following nop is crucial. It is important that the OS *not* recognize
+ // the instruction immediately following the call above as an epilog, if it
+ // does recognize it as an epilogue, it unwinds this function itself rather
+ // than calling our personality routine to do the unwind, and then stack
+ // tracing is hosed.
+ //
+ nop
+
+ // *** Should never get here ***
+ // Hijack should have restored itself first.
+ int 3
+
+ //
+ // epilogue
+ //
+ add rsp, 20h
+ TAILJMP_RAX
+
+// Put a label here to tell the debugger where the end of this function is.
+PATCH_LABEL ExceptionHijackEnd
+
+NESTED_END ExceptionHijack, _TEXT
+
+//
+// Flares for interop debugging.
+// Flares are exceptions (breakpoints) at well known addresses which the RS
+// listens for when interop debugging.
+//
+
+// This exception is from managed code.
+LEAF_ENTRY SignalHijackStartedFlare, _TEXT
+ int 3
+ // make sure that the basic block is unique
+ test rax,1
+ ret
+LEAF_END SignalHijackStartedFlare, _TEXT
+
+// Start the handoff
+LEAF_ENTRY ExceptionForRuntimeHandoffStartFlare, _TEXT
+ int 3
+ // make sure that the basic block is unique
+ test rax,2
+ ret
+LEAF_END ExceptionForRuntimeHandoffStartFlare, _TEXT
+
+// Finish the handoff.
+LEAF_ENTRY ExceptionForRuntimeHandoffCompleteFlare, _TEXT
+ int 3
+ // make sure that the basic block is unique
+ test rax,3
+ ret
+LEAF_END ExceptionForRuntimeHandoffCompleteFlare, _TEXT
+
+// Signal execution return to unhijacked state
+LEAF_ENTRY SignalHijackCompleteFlare, _TEXT
+ int 3
+ // make sure that the basic block is unique
+ test rax,4
+ ret
+LEAF_END SignalHijackCompleteFlare, _TEXT
+
+// This exception is from unmanaged code.
+LEAF_ENTRY ExceptionNotForRuntimeFlare, _TEXT
+ int 3
+ // make sure that the basic block is unique
+ test rax,5
+ ret
+LEAF_END ExceptionNotForRuntimeFlare, _TEXT
+
+// The Runtime is synchronized.
+LEAF_ENTRY NotifyRightSideOfSyncCompleteFlare, _TEXT
+ int 3
+ // make sure that the basic block is unique
+ test rax,6
+ ret
+LEAF_END NotifyRightSideOfSyncCompleteFlare, _TEXT
diff --git a/src/debug/ee/amd64/dbghelpers.asm b/src/debug/ee/amd64/dbghelpers.asm
new file mode 100644
index 0000000000..15199864bd
--- /dev/null
+++ b/src/debug/ee/amd64/dbghelpers.asm
@@ -0,0 +1,153 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+include AsmMacros.inc
+
+extern FuncEvalHijackWorker:proc
+extern FuncEvalHijackPersonalityRoutine:proc
+
+; @dbgtodo- once we port Funceval, use the ExceptionHijack stub instead of this func-eval stub.
+NESTED_ENTRY FuncEvalHijack, _TEXT, FuncEvalHijackPersonalityRoutine
+ ; the stack should be aligned at this point, since we do not call this
+ ; function explicitly
+ alloc_stack 20h
+ END_PROLOGUE
+
+ mov [rsp], rcx
+ call FuncEvalHijackWorker
+
+ ;
+ ; The following nop is crucial. It is important that the OS *not* recognize
+ ; the instruction immediately following the call above as an epilog, if it
+ ; does recognize it as an epilogue, it unwinds this function itself rather
+ ; than calling our personality routine to do the unwind, and then stack
+ ; tracing is hosed.
+ ;
+ nop
+
+ ;
+ ; epilogue
+ ;
+ add rsp, 20h
+ TAILJMP_RAX
+NESTED_END FuncEvalHijack, _TEXT
+
+
+
+extern ExceptionHijackWorker:proc
+extern ExceptionHijackPersonalityRoutine:proc
+
+; This is the general purpose hijacking stub. The DacDbi Hijack primitive will
+; set up the stack and then set the IP here, and so this just makes the call.
+NESTED_ENTRY ExceptionHijack, _TEXT, ExceptionHijackPersonalityRoutine
+ ; the stack should be aligned at this point, since we do not call this
+ ; function explicitly
+ ;
+ ; There is a problem here. The Orcas assembler doesn't like a 0-sized stack frame.
+ ; So we allocate 4 stack slots as the outgoing argument home and just copy the
+ ; arguments set up by DacDbi into these stack slots. We will take a perf hit,
+ ; but this is not a perf critical code path anyway.
+ alloc_stack 20h
+ END_PROLOGUE
+
+ ; We used to do an "alloc_stack 0h" because the stack has been allocated for us
+ ; by the OOP hijacking routine. Our arguments have also been pushed onto the
+ ; stack for us. However, the Orcas compilers don't like a 0-sized frame, so
+ ; we need to allocate something here and then just copy the stack arguments to
+ ; their new argument homes.
+ mov rax, [rsp + 20h]
+ mov [rsp], rax
+ mov rax, [rsp + 28h]
+ mov [rsp + 8h], rax
+ mov rax, [rsp + 30h]
+ mov [rsp + 10h], rax
+ mov rax, [rsp + 38h]
+ mov [rsp + 18h], rax
+
+ ; DD Hijack primitive already set the stack. So just make the call now.
+ call ExceptionHijackWorker
+
+ ;
+ ; The following nop is crucial. It is important that the OS *not* recognize
+ ; the instruction immediately following the call above as an epilog, if it
+ ; does recognize it as an epilogue, it unwinds this function itself rather
+ ; than calling our personality routine to do the unwind, and then stack
+ ; tracing is hosed.
+ ;
+ nop
+
+ ; *** Should never get here ***
+ ; Hijack should have restored itself first.
+ int 3
+
+ ;
+ ; epilogue
+ ;
+ add rsp, 20h
+ TAILJMP_RAX
+
+; Put a label here to tell the debugger where the end of this function is.
+PATCH_LABEL ExceptionHijackEnd
+
+NESTED_END ExceptionHijack, _TEXT
+
+;
+; Flares for interop debugging.
+; Flares are exceptions (breakpoints) at well known addresses which the RS
+; listens for when interop debugging.
+;
+
+; This exception is from managed code.
+LEAF_ENTRY SignalHijackStartedFlare, _TEXT
+ int 3
+ ; make sure that the basic block is unique
+ test rax,1
+ ret
+LEAF_END SignalHijackStartedFlare, _TEXT
+
+; Start the handoff
+LEAF_ENTRY ExceptionForRuntimeHandoffStartFlare, _TEXT
+ int 3
+ ; make sure that the basic block is unique
+ test rax,2
+ ret
+LEAF_END ExceptionForRuntimeHandoffStartFlare, _TEXT
+
+; Finish the handoff.
+LEAF_ENTRY ExceptionForRuntimeHandoffCompleteFlare, _TEXT
+ int 3
+ ; make sure that the basic block is unique
+ test rax,3
+ ret
+LEAF_END ExceptionForRuntimeHandoffCompleteFlare, _TEXT
+
+; Signal execution return to unhijacked state
+LEAF_ENTRY SignalHijackCompleteFlare, _TEXT
+ int 3
+ ; make sure that the basic block is unique
+ test rax,4
+ ret
+LEAF_END SignalHijackCompleteFlare, _TEXT
+
+; This exception is from unmanaged code.
+LEAF_ENTRY ExceptionNotForRuntimeFlare, _TEXT
+ int 3
+ ; make sure that the basic block is unique
+ test rax,5
+ ret
+LEAF_END ExceptionNotForRuntimeFlare, _TEXT
+
+; The Runtime is synchronized.
+LEAF_ENTRY NotifyRightSideOfSyncCompleteFlare, _TEXT
+ int 3
+ ; make sure that the basic block is unique
+ test rax,6
+ ret
+LEAF_END NotifyRightSideOfSyncCompleteFlare, _TEXT
+
+
+
+; This goes at the end of the assembly file
+ end
diff --git a/src/debug/ee/amd64/debuggerregdisplayhelper.cpp b/src/debug/ee/amd64/debuggerregdisplayhelper.cpp
new file mode 100644
index 0000000000..1b32706ac8
--- /dev/null
+++ b/src/debug/ee/amd64/debuggerregdisplayhelper.cpp
@@ -0,0 +1,42 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+/* ------------------------------------------------------------------------- *
+ * DebuggerRegDisplayHelper.cpp -- implementation of the platform-dependent
+//
+
+ * methods for transferring information between
+ * REGDISPLAY and DebuggerREGDISPLAY
+ * ------------------------------------------------------------------------- */
+
+#include "stdafx.h"
+
+void CopyREGDISPLAY(REGDISPLAY* pDst, REGDISPLAY* pSrc)
+{
+ memcpy((BYTE*)pDst, (BYTE*)pSrc, sizeof(REGDISPLAY));
+
+ pDst->pContext = pSrc->pContext;
+
+ if (pSrc->pCurrentContextPointers == &(pSrc->ctxPtrsOne))
+ {
+ pDst->pCurrentContextPointers = &(pDst->ctxPtrsOne);
+ pDst->pCallerContextPointers = &(pDst->ctxPtrsTwo);
+ }
+ else
+ {
+ pDst->pCurrentContextPointers = &(pDst->ctxPtrsTwo);
+ pDst->pCallerContextPointers = &(pDst->ctxPtrsOne);
+ }
+
+ if (pSrc->pCurrentContext == &(pSrc->ctxOne))
+ {
+ pDst->pCurrentContext = &(pDst->ctxOne);
+ pDst->pCallerContext = &(pDst->ctxTwo);
+ }
+ else
+ {
+ pDst->pCurrentContext = &(pDst->ctxTwo);
+ pDst->pCallerContext = &(pDst->ctxOne);
+ }
+}
diff --git a/src/debug/ee/amd64/primitives.cpp b/src/debug/ee/amd64/primitives.cpp
new file mode 100644
index 0000000000..b8b24cb1e4
--- /dev/null
+++ b/src/debug/ee/amd64/primitives.cpp
@@ -0,0 +1,14 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+//
+
+
+#include "stdafx.h"
+
+#include "../../shared/amd64/primitives.cpp"
+
+
diff --git a/src/debug/ee/arm/.gitmirror b/src/debug/ee/arm/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/debug/ee/arm/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/debug/ee/arm/armwalker.cpp b/src/debug/ee/arm/armwalker.cpp
new file mode 100644
index 0000000000..d6a7df0d5e
--- /dev/null
+++ b/src/debug/ee/arm/armwalker.cpp
@@ -0,0 +1,408 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: armwalker.cpp
+//
+
+//
+// ARM instruction decoding/stepping logic
+//
+//*****************************************************************************
+
+#include "stdafx.h"
+
+#include "walker.h"
+
+#include "frames.h"
+#include "openum.h"
+
+
+#ifdef _TARGET_ARM_
+
+void NativeWalker::Decode()
+{
+ // Set default next and skip instruction pointers.
+ m_nextIP = NULL;
+ m_skipIP = NULL;
+ m_type = WALK_UNKNOWN;
+
+ // We can't walk reliably without registers (because we need to know the IT state to determine whether or
+ // not the current instruction will be executed).
+ if (m_registers == NULL)
+ return;
+
+ // Determine whether we're executing in an IT block. If so, check the condition codes and IT state to see
+ // whether we'll execute the current instruction.
+ BYTE bITState = (BYTE)((BitExtract((WORD)m_registers->pCurrentContext->Cpsr, 15, 10) << 2) |
+ BitExtract((WORD)(m_registers->pCurrentContext->Cpsr >> 16), 10, 9));
+ if ((bITState & 0x1f) && !ConditionHolds(BitExtract(bITState, 7, 4)))
+ {
+ // We're in an IT block and the state is such that the current instruction is not scheduled to
+ // execute. Just return WALK_UNKNOWN so the caller will invoke single-step to update the register
+ // context correctly for the next instruction.
+
+ LOG((LF_CORDB, LL_INFO100000, "ArmWalker::Decode: IT block at %x\n", m_ip));
+ return;
+ }
+
+ // Fetch first word of the current instruction. From this we can determine if we've gotten the whole thing
+ // or we're dealing with a 32-bit instruction. If the current instruction is a break instruction, we'll
+ // need to check the patch table to get the correct instruction.
+ WORD opcode1 = CORDbgGetInstruction(m_ip);
+ PRD_TYPE unpatchedOpcode;
+ if (DebuggerController::CheckGetPatchedOpcode(m_ip, &unpatchedOpcode))
+ {
+ opcode1 = (WORD) unpatchedOpcode;
+ }
+
+
+ if (Is32BitInstruction(opcode1))
+ {
+ // Fetch second word of 32-bit instruction.
+ WORD opcode2 = CORDbgGetInstruction((BYTE*)((DWORD)m_ip) + 2);
+
+ LOG((LF_CORDB, LL_INFO100000, "ArmWalker::Decode 32bit instruction at %x, opcode: %x%x\n", m_ip, (DWORD)opcode1, (DWORD)opcode2));
+
+ // WALK_RETURN
+ if (((opcode1 & 0xffd0) == 0xe890) &&
+ ((opcode2 & 0x2000) == 0x0000))
+ {
+ // LDM.W : T2, POP.W : T2
+ DWORD registerList = opcode2;
+
+ if (registerList & 0x8000)
+ {
+ m_type = WALK_RETURN;
+ return;
+ }
+ }
+
+ // WALK_BRANCH
+ else if (((opcode1 & 0xf800) == 0xf000) &&
+ ((opcode2 & 0xd000) == 0x8000) &&
+ ((opcode1 & 0x0380) != 0x0380))
+ {
+ // B.W : T3
+ DWORD S = BitExtract(opcode1, 10, 10);
+ DWORD cond = BitExtract(opcode1, 9, 6);
+ DWORD imm6 = BitExtract(opcode1, 5, 0);
+ DWORD J1 = BitExtract(opcode2, 13, 13);
+ DWORD J2 = BitExtract(opcode2, 11, 11);
+ DWORD imm11 = BitExtract(opcode2, 10, 0);
+
+ if (ConditionHolds(cond))
+ {
+ DWORD disp = (S ? 0xfff00000 : 0) | (J2 << 19) | (J1 << 18) | (imm6 << 12) | (imm11 << 1);
+ m_nextIP = (BYTE*)((GetReg(15) + disp) | THUMB_CODE);
+ m_skipIP = m_nextIP;
+ m_type = WALK_BRANCH;
+ return;
+ }
+ }
+ else if (((opcode1 & 0xf800) == 0xf000) &&
+ ((opcode2 & 0xd000) == 0x9000))
+ {
+ // B.W : T4
+ DWORD S = BitExtract(opcode1, 10, 10);
+ DWORD imm10 = BitExtract(opcode1, 9, 0);
+ DWORD J1 = BitExtract(opcode2, 13, 13);
+ DWORD J2 = BitExtract(opcode2, 11, 11);
+ DWORD imm11 = BitExtract(opcode2, 10, 0);
+
+ DWORD I1 = (J1 ^ S) ^ 1;
+ DWORD I2 = (J2 ^ S) ^ 1;
+
+ DWORD disp = (S ? 0xff000000 : 0) | (I1 << 23) | (I2 << 22) | (imm10 << 12) | (imm11 << 1);
+
+ m_nextIP = (BYTE*)((GetReg(15) + disp) | THUMB_CODE);
+ m_skipIP = m_nextIP;
+ m_type = WALK_BRANCH;
+ return;
+ }
+ else if (((opcode1 & 0xfff0) == 0xf8d0) &&
+ ((opcode1 & 0x000f) != 0x000f))
+ {
+ // LDR.W (immediate): T3
+ DWORD Rn = BitExtract(opcode1, 3, 0);
+ DWORD Rt = BitExtract(opcode2, 15, 12);
+ DWORD imm12 = BitExtract(opcode2, 11, 0);
+
+ if (Rt == 15)
+ {
+ DWORD value = *(DWORD*)(GetReg(Rn) + imm12);
+
+ m_nextIP = (BYTE*)(value | THUMB_CODE);
+ m_skipIP = m_nextIP;
+ m_type = WALK_BRANCH;
+ return;
+ }
+ }
+ else if (((opcode1 & 0xfff0) == 0xf850) &&
+ ((opcode2 & 0x0800) == 0x0800) &&
+ ((opcode1 & 0x000f) != 0x000f))
+ {
+ // LDR (immediate) : T4, POP : T3
+ DWORD Rn = BitExtract(opcode1, 3, 0);
+ DWORD Rt = BitExtract(opcode2, 15, 12);
+ DWORD P = BitExtract(opcode2, 10, 10);
+ DWORD U = BitExtract(opcode2, 9, 9);
+ DWORD imm8 = BitExtract(opcode2, 7, 0);
+
+ if (Rt == 15)
+ {
+ DWORD offset_addr = U ? GetReg(Rn) + imm8 : GetReg(Rn) - imm8;
+ DWORD addr = P ? offset_addr : GetReg(Rn);
+
+ DWORD value = *(DWORD*)addr;
+
+ m_nextIP = (BYTE*)(value | THUMB_CODE);
+ m_skipIP = m_nextIP;
+ m_type = WALK_BRANCH;
+ return;
+ }
+ }
+ else if (((opcode1 & 0xff7f) == 0xf85f))
+ {
+ // LDR.W (literal) : T2
+ DWORD U = BitExtract(opcode1, 7, 7);
+ DWORD Rt = BitExtract(opcode2, 15, 12);
+ DWORD imm12 = BitExtract(opcode2, 11, 0);
+
+ if (Rt == 15)
+ {
+ DWORD addr = GetReg(15) & ~3;
+ addr = U ? addr + imm12 : addr - imm12;
+
+ DWORD value = *(DWORD*)addr;
+
+ m_nextIP = (BYTE*)(value | THUMB_CODE);
+ m_skipIP = m_nextIP;
+ m_type = WALK_BRANCH;
+ return;
+ }
+ }
+ else if (((opcode1 & 0xfff0) == 0xf850) &&
+ ((opcode2 & 0x0fc0) == 0x0000) &&
+ ((opcode1 & 0x000f) != 0x000f))
+ {
+ // LDR.W : T2
+ DWORD Rn = BitExtract(opcode1, 3, 0);
+ DWORD Rt = BitExtract(opcode2, 15, 12);
+ DWORD imm2 = BitExtract(opcode2, 5, 4);
+ DWORD Rm = BitExtract(opcode2, 3, 0);
+
+ if (Rt == 15)
+ {
+ DWORD addr = GetReg(Rn) + (GetReg(Rm) << imm2);
+
+ DWORD value = *(DWORD*)addr;
+
+ m_nextIP = (BYTE*)(value | THUMB_CODE);
+ m_skipIP = m_nextIP;
+ m_type = WALK_BRANCH;
+ return;
+ }
+ }
+ else if (((opcode1 & 0xfff0) == 0xe8d0) &&
+ ((opcode2 & 0xffe0) == 0xf000))
+ {
+ // TBB/TBH : T1
+ DWORD Rn = BitExtract(opcode1, 3, 0);
+ DWORD H = BitExtract(opcode2, 4, 4);
+ DWORD Rm = BitExtract(opcode2, 3, 0);
+
+ DWORD addr = GetReg(Rn);
+
+ DWORD value;
+ if (H)
+ value = *(WORD*)(addr + (GetReg(Rm) << 1));
+ else
+ value = *(BYTE*)(addr + GetReg(Rm));
+
+ m_nextIP = (BYTE*)((GetReg(15) + (value << 1)) | THUMB_CODE);
+ m_skipIP = m_nextIP;
+ m_type = WALK_BRANCH;
+ return;
+ }
+
+ // WALK_CALL
+ else if (((opcode1 & 0xf800) == 0xf000) &&
+ ((opcode2 & 0xd000) == 0xd000))
+ {
+ // BL (immediate) : T1
+ DWORD S = BitExtract(opcode1, 10, 10);
+ DWORD imm10 = BitExtract(opcode1, 9, 0);
+ DWORD J1 = BitExtract(opcode2, 13, 13);
+ DWORD J2 = BitExtract(opcode2, 11, 11);
+ DWORD imm11 = BitExtract(opcode2, 10, 0);
+
+ DWORD I1 = (J1 ^ S) ^ 1;
+ DWORD I2 = (J2 ^ S) ^ 1;
+
+ DWORD disp = (S ? 0xff000000 : 0) | (I1 << 23) | (I2 << 22) | (imm10 << 12) | (imm11 << 1);
+
+ m_nextIP = (BYTE*)((GetReg(15) + disp) | THUMB_CODE);
+ m_skipIP =(BYTE*)(((DWORD)m_ip) + 4);
+ m_type = WALK_CALL;
+ return;
+ }
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO100000, "ArmWalker::Decode 16bit instruction at %x, opcode: %x\n", m_ip, (DWORD)opcode1));
+ // WALK_RETURN
+ if ((opcode1 & 0xfe00) == 0xbc00)
+ {
+ // POP : T1
+ DWORD P = BitExtract(opcode1, 8, 8);
+ DWORD registerList = (P << 15) | BitExtract(opcode1, 7, 0);
+
+ if (registerList & 0x8000)
+ {
+ m_type = WALK_RETURN;
+ return;
+ }
+ }
+
+ // WALK_BRANCH
+ else if (((opcode1 & 0xf000) == 0xd000) &&
+ ((opcode1 & 0x0f00) != 0x0e00) )
+ {
+ // B : T1
+ DWORD cond = BitExtract(opcode1, 11, 8);
+ DWORD imm8 = BitExtract(opcode1, 7, 0);
+
+ if (ConditionHolds(cond))
+ {
+ DWORD disp = (imm8 << 1) | ((imm8 & 0x80) ? 0xffffff00 : 0);
+
+ m_nextIP = (BYTE*)((GetReg(15) + disp) | THUMB_CODE);
+ m_skipIP = m_nextIP;
+ m_type = WALK_BRANCH;
+ return;
+ }
+ }
+ else if ((opcode1 & 0xf800) == 0xe000)
+ {
+ // B : T2
+ DWORD imm11 = BitExtract(opcode1, 10, 0);
+
+ DWORD disp = (imm11 << 1) | ((imm11 & 0x400) ? 0xfffff000 : 0);
+
+ m_nextIP = (BYTE*)((GetReg(15) + disp) | THUMB_CODE);
+ m_skipIP = m_nextIP;
+ m_type = WALK_BRANCH;
+ return;
+ }
+ else if ((opcode1 & 0xff87) == 0x4700)
+ {
+ // BX : T1
+ DWORD Rm = BitExtract(opcode1, 6, 3);
+
+ m_nextIP = (BYTE*)(GetReg(Rm) | THUMB_CODE);
+ m_skipIP = m_nextIP;
+ m_type = (Rm != 14) ? WALK_BRANCH : WALK_RETURN;
+ return;
+ }
+ else if ((opcode1 & 0xff00) == 0x4600)
+ {
+ // MOV (register) : T1
+ DWORD D = BitExtract(opcode1, 7, 7);
+ DWORD Rm = BitExtract(opcode1, 6, 3);
+ DWORD Rd = (D << 3) | BitExtract(opcode1, 2, 0);
+
+ if (Rd == 15)
+ {
+ m_nextIP = (BYTE*)(GetReg(Rm) | THUMB_CODE);
+ m_skipIP = m_nextIP;
+ m_type = WALK_BRANCH;
+ return;
+ }
+ }
+
+ // WALK_CALL
+ else if ((opcode1 & 0xff87) == 0x4780)
+ {
+ // BLX (register) : T1
+ DWORD Rm = BitExtract(opcode1, 6, 3);
+
+ m_nextIP = (BYTE*)(GetReg(Rm) | THUMB_CODE);
+ m_skipIP = (BYTE*)(((DWORD)m_ip) + 2);
+ m_type = WALK_CALL;
+ return;
+ }
+ }
+}
+
+// Get the current value of a register. PC (register 15) is always reported as the current instruction PC + 4
+// as per the ARM architecture.
+DWORD NativeWalker::GetReg(DWORD reg)
+{
+ _ASSERTE(reg <= 15);
+
+ if (reg == 15)
+ return (m_registers->pCurrentContext->Pc + 4) & ~THUMB_CODE;
+
+ return (&m_registers->pCurrentContext->R0)[reg];
+}
+
+// Returns true if the current context indicates the ARM condition specified holds.
+bool NativeWalker::ConditionHolds(DWORD cond)
+{
+ // Bit numbers of the condition flags in the CPSR.
+ enum APSRBits
+ {
+ APSR_N = 31,
+ APSR_Z = 30,
+ APSR_C = 29,
+ APSR_V = 28,
+ };
+
+// Return true if the given condition (C, N, Z or V) holds in the current context.
+#define GET_FLAG(_flag) \
+ ((m_registers->pCurrentContext->Cpsr & (1 << APSR_##_flag)) != 0)
+
+ switch (cond)
+ {
+ case 0: // EQ (Z==1)
+ return GET_FLAG(Z);
+ case 1: // NE (Z==0)
+ return !GET_FLAG(Z);
+ case 2: // CS (C==1)
+ return GET_FLAG(C);
+ case 3: // CC (C==0)
+ return !GET_FLAG(C);
+ case 4: // MI (N==1)
+ return GET_FLAG(N);
+ case 5: // PL (N==0)
+ return !GET_FLAG(N);
+ case 6: // VS (V==1)
+ return GET_FLAG(V);
+ case 7: // VC (V==0)
+ return !GET_FLAG(V);
+ case 8: // HI (C==1 && Z==0)
+ return GET_FLAG(C) && !GET_FLAG(Z);
+ case 9: // LS (C==0 || Z==1)
+ return !GET_FLAG(C) || GET_FLAG(Z);
+ case 10: // GE (N==V)
+ return GET_FLAG(N) == GET_FLAG(V);
+ case 11: // LT (N!=V)
+ return GET_FLAG(N) != GET_FLAG(V);
+ case 12: // GT (Z==0 && N==V)
+ return !GET_FLAG(Z) && (GET_FLAG(N) == GET_FLAG(V));
+ case 13: // LE (Z==1 || N!=V)
+ return GET_FLAG(Z) || (GET_FLAG(N) != GET_FLAG(V));
+ case 14: // AL
+ return true;
+ case 15:
+ _ASSERTE(!"Unsupported condition code: 15");
+ return false;
+ default:
+ UNREACHABLE();
+ return false;
+ }
+}
+
+#endif
diff --git a/src/debug/ee/arm/dbghelpers.asm b/src/debug/ee/arm/dbghelpers.asm
new file mode 100644
index 0000000000..d356de57eb
--- /dev/null
+++ b/src/debug/ee/arm/dbghelpers.asm
@@ -0,0 +1,91 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+#include "ksarm.h"
+#include "asmconstants.h"
+
+ IMPORT FuncEvalHijackWorker
+ IMPORT FuncEvalHijackPersonalityRoutine
+ IMPORT ExceptionHijackWorker
+ IMPORT ExceptionHijackPersonalityRoutine
+ EXPORT ExceptionHijackEnd
+
+ MACRO
+ CHECK_STACK_ALIGNMENT
+
+#ifdef _DEBUG
+ push {r0}
+ add r0, sp, #4
+ tst r0, #7
+ pop {r0}
+ beq %0
+ EMIT_BREAKPOINT
+0
+#endif
+ MEND
+
+ TEXTAREA
+
+;
+; hijacking stub used to perform a func-eval, see Debugger::FuncEvalSetup() for use.
+;
+; on entry:
+; r0 : pointer to DebuggerEval object
+;
+
+ NESTED_ENTRY FuncEvalHijack,,FuncEvalHijackPersonalityRoutine
+
+ ; NOTE: FuncEvalHijackPersonalityRoutine is dependent on the stack layout so if
+ ; you change the prolog you will also need to update the personality routine.
+
+ ; push arg to the stack so our personality routine can find it
+ ; push lr to get good stacktrace in debugger
+ PROLOG_PUSH {r0,lr}
+
+ CHECK_STACK_ALIGNMENT
+
+ ; FuncEvalHijackWorker returns the address we should jump to.
+ bl FuncEvalHijackWorker
+
+ ; effective NOP to terminate unwind
+ mov r2, r2
+
+ EPILOG_STACK_FREE 8
+ EPILOG_BRANCH_REG r0
+
+ NESTED_END FuncEvalHijack
+
+;
+; This is the general purpose hijacking stub. DacDbiInterfaceImpl::Hijack() will
+; set the registers with the appropriate parameters from out-of-process.
+;
+; on entry:
+; r0 : pointer to CONTEXT
+; r1 : pointer to EXCEPTION_RECORD
+; r2 : EHijackReason
+; r3 : void* pdata
+;
+
+ NESTED_ENTRY ExceptionHijack,,ExceptionHijackPersonalityRoutine
+
+ CHECK_STACK_ALIGNMENT
+
+ ; make the call
+ bl ExceptionHijackWorker
+
+ ; effective NOP to terminate unwind
+ mov r3, r3
+
+ ; *** should never get here ***
+ EMIT_BREAKPOINT
+
+; exported label so the debugger knows where the end of this function is
+ExceptionHijackEnd
+ NESTED_END
+
+
+ ; must be at end of file
+ END
+
diff --git a/src/debug/ee/arm/primitives.cpp b/src/debug/ee/arm/primitives.cpp
new file mode 100644
index 0000000000..6a775779c3
--- /dev/null
+++ b/src/debug/ee/arm/primitives.cpp
@@ -0,0 +1,38 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+
+#include "stdafx.h"
+#include "threads.h"
+#include "../../shared/arm/primitives.cpp"
+
+void CopyREGDISPLAY(REGDISPLAY* pDst, REGDISPLAY* pSrc)
+{
+ CONTEXT tmp;
+ CopyRegDisplay(pSrc, pDst, &tmp);
+}
+
+void SetSSFlag(DT_CONTEXT *, Thread *pThread)
+{
+ _ASSERTE(pThread != NULL);
+
+ pThread->EnableSingleStep();
+}
+
+void UnsetSSFlag(DT_CONTEXT *, Thread *pThread)
+{
+ _ASSERTE(pThread != NULL);
+
+ pThread->DisableSingleStep();
+}
+
+// Check if single stepping is enabled.
+bool IsSSFlagEnabled(DT_CONTEXT *, Thread *pThread)
+{
+ _ASSERTE(pThread != NULL);
+
+ return pThread->IsSingleStepEnabled();
+}
diff --git a/src/debug/ee/arm64/.gitmirror b/src/debug/ee/arm64/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/debug/ee/arm64/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/debug/ee/arm64/arm64walker.cpp b/src/debug/ee/arm64/arm64walker.cpp
new file mode 100644
index 0000000000..d981b0c064
--- /dev/null
+++ b/src/debug/ee/arm64/arm64walker.cpp
@@ -0,0 +1,23 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: Amd64walker.cpp
+//
+
+//
+// AMD64 instruction decoding/stepping logic
+//
+//*****************************************************************************
+
+#include "stdafx.h"
+
+#include "walker.h"
+
+#include "frames.h"
+#include "openum.h"
+
+#ifdef _TARGET_ARM64_
+
+#endif
diff --git a/src/debug/ee/arm64/primitives.cpp b/src/debug/ee/arm64/primitives.cpp
new file mode 100644
index 0000000000..86d6159164
--- /dev/null
+++ b/src/debug/ee/arm64/primitives.cpp
@@ -0,0 +1,16 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+
+#include "stdafx.h"
+#include "threads.h"
+#include "../../shared/arm64/primitives.cpp"
+
+void CopyREGDISPLAY(REGDISPLAY* pDst, REGDISPLAY* pSrc)
+{
+ CONTEXT tmp;
+ CopyRegDisplay(pSrc, pDst, &tmp);
+}
diff --git a/src/debug/ee/canary.cpp b/src/debug/ee/canary.cpp
new file mode 100644
index 0000000000..cab5c94ef6
--- /dev/null
+++ b/src/debug/ee/canary.cpp
@@ -0,0 +1,325 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: Canary.cpp
+//
+
+//
+// Canary for debugger helper thread. This will sniff out if it's safe to take locks.
+//
+//*****************************************************************************
+
+#include "stdafx.h"
+
+
+//-----------------------------------------------------------------------------
+// Ctor for HelperCanary class
+//-----------------------------------------------------------------------------
+HelperCanary::HelperCanary()
+{
+ m_hCanaryThread = NULL;
+ m_CanaryThreadId = 0;
+ m_RequestCounter = 0;
+ m_AnswerCounter = 0;
+ m_fStop = false;
+
+ m_fCachedValid = false;
+ m_fCachedAnswer = false;
+ m_initialized = false;
+}
+
+//-----------------------------------------------------------------------------
+// Dtor for class
+//-----------------------------------------------------------------------------
+HelperCanary::~HelperCanary()
+{
+ // Since we're deleting this memory, we need to kill the canary thread.
+ m_fStop = true;
+ SetEvent(m_hPingEvent);
+
+ // m_hPingEvent dtor will close handle
+ WaitForSingleObject(m_hCanaryThread, INFINITE);
+}
+
+//-----------------------------------------------------------------------------
+// Clear the cached value for AreLocksAvailable();
+//-----------------------------------------------------------------------------
+void HelperCanary::ClearCache()
+{
+ _ASSERTE(ThisIsHelperThreadWorker());
+ m_fCachedValid = false;
+}
+
+//-----------------------------------------------------------------------------
+// The helper thread can call this to determine if it can safely take a certain
+// set of locks (mainly the heap lock(s)). The canary thread will go off and
+// try and take these and report back to the helper w/o ever blocking the
+// helper.
+//
+// Returns 'true' if it's safe for helper to take locks; else false.
+// We err on the side of safety (returning false).
+//-----------------------------------------------------------------------------
+bool HelperCanary::AreLocksAvailable()
+{
+ // If we're not on the helper thread, then we're guaranteed safe.
+ // We check this to support MaybeHelperThread code.
+ if (!ThisIsHelperThreadWorker())
+ {
+ return true;
+ }
+
+ if (m_fCachedValid)
+ {
+ return m_fCachedAnswer;
+ }
+
+ // Cache the answer.
+ m_fCachedAnswer = AreLocksAvailableWorker();
+ m_fCachedValid = true;
+
+#ifdef _DEBUG
+ // For managed-only debugging, we should always be safe.
+ if (!g_pRCThread->GetDCB()->m_rightSideIsWin32Debugger)
+ {
+ _ASSERTE(m_fCachedAnswer || !"Canary returned false in Managed-debugger");
+ }
+
+ // For debug, nice to be able to enable an assert that tells us if this situation is actually happening.
+ if (!m_fCachedAnswer)
+ {
+ static BOOL shouldBreak = -1;
+ if (shouldBreak == -1)
+ {
+ shouldBreak = UnsafeGetConfigDWORD(CLRConfig::INTERNAL_DbgBreakIfLocksUnavailable);
+ }
+ if (shouldBreak)
+ {
+ _ASSERTE(!"Potential deadlock detected.\nLocks that the helper thread may need are currently held by other threads.");
+ }
+ }
+#endif // _DEBUG
+
+ return m_fCachedAnswer;
+}
+
+//-----------------------------------------------------------------------------
+// Creates the canary thread and signaling events.
+//-----------------------------------------------------------------------------
+void HelperCanary::Init()
+{
+ // You can only run the init code once. The debugger attempts to lazy-init
+ // the canary at several points but if the canary is already inited then
+ // we just eagerly return. See issue 841005 for more details.
+ if(m_initialized)
+ {
+ return;
+ }
+ else
+ {
+ m_initialized = true;
+ }
+
+ m_hPingEvent = WszCreateEvent(NULL, (BOOL) kAutoResetEvent, FALSE, NULL);
+ if (m_hPingEvent == NULL)
+ {
+ STRESS_LOG1(LF_CORDB, LL_ALWAYS, "Canary failed to create ping event. gle=%d\n", GetLastError());
+ // in the past if we failed to start the thread we just assumed it was unsafe
+ // so I am preserving that behavior. However I am going to assert that this
+ // doesn't really happen
+ _ASSERTE(!"Canary failed to create ping event");
+ return;
+ }
+
+ m_hWaitEvent = WszCreateEvent(NULL, (BOOL) kManualResetEvent, FALSE, NULL);
+ if (m_hWaitEvent == NULL)
+ {
+ STRESS_LOG1(LF_CORDB, LL_ALWAYS, "Canary failed to create wait event. gle=%d\n", GetLastError());
+ // in the past if we failed to start the thread we just assumed it was unsafe
+ // so I am preserving that behavior. However I am going to assert that this
+ // doesn't really happen
+ _ASSERTE(!"Canary failed to create wait event");
+ return;
+ }
+
+ // Spin up the canary. This will call dllmain, but that's ok because it just
+ // degenerates to our timeout case.
+ const DWORD flags = CREATE_SUSPENDED;
+ m_hCanaryThread = CreateThread(NULL, 0,
+ HelperCanary::ThreadProc, this,
+ flags, &m_CanaryThreadId);
+
+ // in the past if we failed to start the thread we just assumed it was unsafe
+ // so I am preserving that behavior. However I am going to assert that this
+ // doesn't really happen
+ if(m_hCanaryThread == NULL)
+ {
+ _ASSERTE(!"CreateThread() failed to create Canary thread");
+ return;
+ }
+
+ // Capture the Canary thread's TID so that the RS can mark it as a can't-stop region.
+ // This is essential so that the RS doesn't view it as some external thread to be suspended when we hit
+ // debug events.
+ _ASSERTE(g_pRCThread != NULL);
+ g_pRCThread->GetDCB()->m_CanaryThreadId = m_CanaryThreadId;
+
+ ResumeThread(m_hCanaryThread);
+}
+
+
+//-----------------------------------------------------------------------------
+// Does real work for AreLocksAvailable(), minus caching.
+//-----------------------------------------------------------------------------
+bool HelperCanary::AreLocksAvailableWorker()
+{
+#if _DEBUG
+ // For debugging, allow a way to force the canary to fail, and thus test our
+ // failure paths.
+ static BOOL fShortcut= -1;
+ if (fShortcut == -1)
+ {
+ fShortcut = UnsafeGetConfigDWORD(CLRConfig::INTERNAL_DbgShortcutCanary);
+ }
+ if (fShortcut == 1)
+ {
+ return false;
+ }
+ if (fShortcut == 2)
+ {
+ return true;
+ }
+#endif
+
+ // We used to do lazy init but that is dangerous... CreateThread
+ // allocates some memory which can block on a lock, exactly the
+ // situation we are attempting to detect and not block on.
+ // Instead we spin up the canary in advance and if that failed then
+ // assume unsafe
+ if(m_CanaryThreadId == 0)
+ {
+ _ASSERTE(!"We shouldn't be lazy initing the canary anymore");
+ return false;
+ }
+
+ // Canary will take the locks of interest and then set the Answer counter equal to our request counter.
+ m_RequestCounter = m_RequestCounter + 1;
+ ResetEvent(m_hWaitEvent);
+ SetEvent(m_hPingEvent);
+
+ // Spin waiting for answer. If canary gets back to us, then the locks must be free and so it's safe for helper-thread.
+ // If we timeout, then we err on the side of safety and assume canary blocked on a lock and so it's not safe
+ // for the helper thread to take those locks.
+ // We explicitly have a simple spin-wait instead of using win32 events because we want something simple and
+ // provably correct. Since we already need the spin-wait for the counters, adding an extra win32 event
+ // to get rid of the sleep would be additional complexity and race windows without a clear benefit.
+
+ // We need to track what iteration of "AreLocksAvailable" the helper is on. Say canary sniffs two locks, now Imagine if:
+ // 1) Helper calls AreLocksAvailable,
+ // 2) the canary does get blocked on lock #1,
+ // 3) process resumes, canary now gets + releases lock #1,
+ // 4) another random thread takes lock #1
+ // 5) then helper calls AreLocksAvailable again later
+ // 6) then the canary finally finishes. Note it's never tested lock #1 on the 2nd iteration.
+ // We don't want the canary's response initiated from the 1st request to impact the Helper's 2nd request.
+ // Thus we keep a request / answer counter to make sure that the canary tests all locks on the same iteration.
+ DWORD retry = 0;
+
+ const DWORD msSleepSteadyState = 150; // sleep time in ms
+ const DWORD maxRetry = 15; // number of times to try.
+ DWORD msSleep = 80; // how much to sleep on first iteration.
+
+ while(m_RequestCounter != m_AnswerCounter)
+ {
+ retry ++;
+ if (retry > maxRetry)
+ {
+ STRESS_LOG0(LF_CORDB, LL_ALWAYS, "Canary timed out!\n");
+ return false;
+ }
+
+ // We'll either timeout (in which case it's like a Sleep(), or
+ // get the event, which shortcuts the sleep.
+ WaitForSingleObject(m_hWaitEvent, msSleep);
+
+ // In case a stale answer sets the wait event high, reset it now to avoid us doing
+ // a live spin-lock.
+ ResetEvent(m_hWaitEvent);
+
+
+ msSleep = msSleepSteadyState;
+ }
+
+ // Canary made it on same Request iteration, so it must be safe!
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Real OS thread proc for Canary thread.
+// param - 'this' pointer for HelperCanary
+// return value - meaningless, but threads need to return something.
+//-----------------------------------------------------------------------------
+DWORD HelperCanary::ThreadProc(LPVOID param)
+{
+ _ASSERTE(!ThisIsHelperThreadWorker());
+
+ STRESS_LOG0(LF_CORDB, LL_ALWAYS, "Canary thread spun up\n");
+ HelperCanary * pThis = reinterpret_cast<HelperCanary*> (param);
+ pThis->ThreadProc();
+ _ASSERTE(pThis->m_fStop);
+ STRESS_LOG0(LF_CORDB, LL_ALWAYS, "Canary thread exiting\n");
+
+ return 0;
+}
+
+//-----------------------------------------------------------------------------
+// Real implementation of Canary Thread.
+// Single canary thread is reused after creation.
+//-----------------------------------------------------------------------------
+void HelperCanary::ThreadProc()
+{
+ _ASSERTE(m_CanaryThreadId == GetCurrentThreadId());
+
+ while(true)
+ {
+ WaitForSingleObject(m_hPingEvent, INFINITE);
+
+ m_AnswerCounter = 0;
+ DWORD dwRequest = m_RequestCounter;
+
+ if (m_fStop)
+ {
+ return;
+ }
+ STRESS_LOG2(LF_CORDB, LL_ALWAYS, "stage:%d,req:%d", 0, dwRequest);
+
+ // Now take the locks of interest. This could block indefinitely. If this blocks, we may even get multiple requests.
+ TakeLocks();
+
+ m_AnswerCounter = dwRequest;
+
+ // Set wait event to let Requesting thread shortcut its spin lock. This is purely an
+ // optimization because requesting thread will still check Answer/Request counters.
+ // That protects us from recyling bugs.
+ SetEvent(m_hWaitEvent);
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Try and take locks.
+//-----------------------------------------------------------------------------
+void HelperCanary::TakeLocks()
+{
+ _ASSERTE(::GetThread() == NULL); // Canary Thread should always be outside the runtime.
+ _ASSERTE(m_CanaryThreadId == GetCurrentThreadId());
+
+ // Call new, which will take whatever standard heap locks there are.
+ // We don't care about what memory we get; we just want to take the heap lock(s).
+ DWORD * p = new (nothrow) DWORD();
+ delete p;
+
+ STRESS_LOG1(LF_CORDB, LL_ALWAYS, "canary stage:%d\n", 1);
+}
+
+
diff --git a/src/debug/ee/canary.h b/src/debug/ee/canary.h
new file mode 100644
index 0000000000..b14f5a48d1
--- /dev/null
+++ b/src/debug/ee/canary.h
@@ -0,0 +1,81 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: Canary.h
+//
+
+//
+// Header file Debugger Canary
+//
+//*****************************************************************************
+
+#ifndef CANARY_H
+#define CANARY_H
+
+//-----------------------------------------------------------------------------
+// Canary.
+//
+// The helper thread needs to be very careful about what locks it takes. If it takes a lock
+// held by a suspended thread, then the whole process deadlocks (Since the suspended thread
+// is waiting for the helper to resume it).
+// In general, we try to avoid having the helper take such locks, but the problem is unsolvable
+// because:
+// - we don't know what that set of locks are (eg, OS apis may take new locks between versions)
+// - the helper may call into the EE and that takes unsafe locks.
+// The most prominent dangerous lock is the heap lock, which is why we have the "InteropSafe" heap.
+// Since we don't even know what locks are bad (eg, we can't actually find the Heaplock), we can't
+// explicitly check if the lock is safe to take.
+// So we spin up an auxiallary "Canary" thread which can sniff for locks that the helper thread will
+// need to take. Thus the helper thread can find out if the locks are available without actually taking them.
+// The "Canary" can call APIs that take the locks (such as regular "new" for the process heap lock).
+// The helper will wait on the canary with timeout. If the canary returns, the helper knows it's
+// safe to take the locks. If the canary times out, then the helper assumes it's blocked on the
+// locks and thus not safe for the helper to take them.
+//-----------------------------------------------------------------------------
+class HelperCanary
+{
+public:
+ HelperCanary();
+ ~HelperCanary();
+
+ void Init();
+ bool AreLocksAvailable();
+ void ClearCache();
+
+protected:
+ static DWORD WINAPI ThreadProc(LPVOID param);
+ void ThreadProc();
+ void TakeLocks();
+ bool AreLocksAvailableWorker();
+
+ // Flag to tell Canary thread to exit.
+ bool m_fStop;
+
+ // Flag to indicate Init has been run
+ bool m_initialized;
+
+ // Cache the answers between stops so that we don't have to ping the canary every time.
+ bool m_fCachedValid;
+ bool m_fCachedAnswer;
+
+ HANDLE m_hCanaryThread; // handle for canary thread
+ DWORD m_CanaryThreadId; // canary thread OS Thread ID
+
+ // These counters are read + written by both helper and canary thread.
+ // These need to be volatile because of how they're being accessed from different threads.
+ // However, since each is only read from 1 thread, and written by another, and the WFSO/SetEvent
+ // will give us a memory barrier, and we have a flexible polling operation, volatile is
+ // sufficient to deal with memory barrier issues.
+ Volatile<DWORD> m_RequestCounter;
+ Volatile<DWORD> m_AnswerCounter;
+ HandleHolder m_hPingEvent;
+
+ // We use a Manual wait event to replace Sleep.
+ HandleHolder m_hWaitEvent;
+};
+
+
+#endif // CANARY_H
+
diff --git a/src/debug/ee/controller.cpp b/src/debug/ee/controller.cpp
new file mode 100644
index 0000000000..9a990dec81
--- /dev/null
+++ b/src/debug/ee/controller.cpp
@@ -0,0 +1,8819 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// ==++==
+//
+
+//
+// ==--==
+// ****************************************************************************
+// File: controller.cpp
+//
+
+//
+// controller.cpp: Debugger execution control routines
+//
+// ****************************************************************************
+// Putting code & #includes, #defines, etc, before the stdafx.h will
+// cause the code,etc, to be silently ignored
+#include "stdafx.h"
+#include "openum.h"
+#include "../inc/common.h"
+#include "eeconfig.h"
+
+#include "../../vm/methoditer.h"
+
+const char *GetTType( TraceType tt);
+
+#define IsSingleStep(exception) (exception == EXCEPTION_SINGLE_STEP)
+
+
+
+
+
+// -------------------------------------------------------------------------
+// DebuggerController routines
+// -------------------------------------------------------------------------
+
+SPTR_IMPL_INIT(DebuggerPatchTable, DebuggerController, g_patches, NULL);
+SVAL_IMPL_INIT(BOOL, DebuggerController, g_patchTableValid, FALSE);
+
+#if !defined(DACCESS_COMPILE)
+
+DebuggerController *DebuggerController::g_controllers = NULL;
+DebuggerControllerPage *DebuggerController::g_protections = NULL;
+CrstStatic DebuggerController::g_criticalSection;
+int DebuggerController::g_cTotalMethodEnter = 0;
+
+
+// Is this patch at a position at which it's safe to take a stack?
+bool DebuggerControllerPatch::IsSafeForStackTrace()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ TraceType tt = this->trace.GetTraceType();
+ Module *module = this->key.module;
+ BOOL managed = this->IsManagedPatch();
+
+ // Patches placed by MgrPush can come at lots of illegal spots. Can't take a stack trace here.
+ if ((module == NULL) && managed && (tt == TRACE_MGR_PUSH))
+ {
+ return false;
+ }
+
+ // Consider everything else legal.
+ // This is a little shady for TRACE_FRAME_PUSH. But TraceFrame() needs a stackInfo
+ // to get a RegDisplay (though almost nobody uses it, so perhaps it could be removed).
+ return true;
+
+}
+
+#ifndef _TARGET_ARM_
+// returns a pointer to the shared buffer. each call will AddRef() the object
+// before returning it so callers only need to Release() when they're finished with it.
+SharedPatchBypassBuffer* DebuggerControllerPatch::GetOrCreateSharedPatchBypassBuffer()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (m_pSharedPatchBypassBuffer == NULL)
+ {
+ m_pSharedPatchBypassBuffer = new (interopsafeEXEC) SharedPatchBypassBuffer();
+ _ASSERTE(m_pSharedPatchBypassBuffer);
+ TRACE_ALLOC(m_pSharedPatchBypassBuffer);
+ }
+
+ m_pSharedPatchBypassBuffer->AddRef();
+
+ return m_pSharedPatchBypassBuffer;
+}
+#endif // _TARGET_ARM_
+
+// @todo - remove all this splicing trash
+// This Sort/Splice stuff just reorders the patches within a particular chain such
+// that when we iterate through by calling GetPatch() and GetNextPatch(DebuggerControllerPatch),
+// we'll get patches in increasing order of DebuggerControllerTypes.
+// Practically, this means that calling GetPatch() will return EnC patches before stepping patches.
+//
+#if 1
+void DebuggerPatchTable::SortPatchIntoPatchList(DebuggerControllerPatch **ppPatch)
+{
+ LOG((LF_CORDB, LL_EVERYTHING, "DPT::SPIPL called.\n"));
+#ifdef _DEBUG
+ DebuggerControllerPatch *patchFirst
+ = (DebuggerControllerPatch *) Find(Hash((*ppPatch)), Key((*ppPatch)));
+ _ASSERTE(patchFirst == (*ppPatch));
+ _ASSERTE((*ppPatch)->controller->GetDCType() != DEBUGGER_CONTROLLER_STATIC);
+#endif //_DEBUG
+ DebuggerControllerPatch *patchNext = GetNextPatch((*ppPatch));
+LOG((LF_CORDB, LL_EVERYTHING, "DPT::SPIPL GetNextPatch passed\n"));
+ //List contains one, (sorted) element
+ if (patchNext == NULL)
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "DPT::SPIPL: Patch 0x%x is a sorted singleton\n", (*ppPatch)));
+ return;
+ }
+
+ // If we decide to reorder the list, we'll need to keep the element
+ // indexed by the hash function as the (sorted)first item. Everything else
+ // chains off this element, can can thus stay put.
+ // Thus, either the element we just added is already sorted, or else we'll
+ // have to move it elsewhere in the list, meaning that we'll have to swap
+ // the second item & the new item, so that the index points to the proper
+ // first item in the list.
+
+ //use Cur ptr for case where patch gets appended to list
+ DebuggerControllerPatch *patchCur = patchNext;
+
+ while (patchNext != NULL &&
+ ((*ppPatch)->controller->GetDCType() >
+ patchNext->controller->GetDCType()) )
+ {
+ patchCur = patchNext;
+ patchNext = GetNextPatch(patchNext);
+ }
+
+ if (patchNext == GetNextPatch((*ppPatch)))
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "DPT::SPIPL: Patch 0x%x is already sorted\n", (*ppPatch)));
+ return; //already sorted
+ }
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "DPT::SPIPL: Patch 0x%x will be moved \n", (*ppPatch)));
+
+ //remove it from the list
+ SpliceOutOfList((*ppPatch));
+
+ // the kinda neat thing is: since we put it originally at the front of the list,
+ // and it's not in order, then it must be behind another element of this list,
+ // so we don't have to write any 'SpliceInFrontOf' code.
+
+ _ASSERTE(patchCur != NULL);
+ SpliceInBackOf((*ppPatch), patchCur);
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "DPT::SPIPL: Patch 0x%x is now sorted\n", (*ppPatch)));
+}
+
+// This can leave the list empty, so don't do this unless you put
+// the patch back somewhere else.
+void DebuggerPatchTable::SpliceOutOfList(DebuggerControllerPatch *patch)
+{
+ // We need to get iHash, the index of the ptr within
+ // m_piBuckets, ie it's entry in the hashtable.
+ ULONG iHash = Hash(patch) % m_iBuckets;
+ ULONG iElement = m_piBuckets[iHash];
+ DebuggerControllerPatch *patchFirst
+ = (DebuggerControllerPatch *) EntryPtr(iElement);
+
+ // Fix up pointers to chain
+ if (patchFirst == patch)
+ {
+ // The first patch shouldn't have anything behind it.
+ _ASSERTE(patch->entry.iPrev == DPT_INVALID_SLOT);
+
+ if (patch->entry.iNext != DPT_INVALID_SLOT)
+ {
+ m_piBuckets[iHash] = patch->entry.iNext;
+ }
+ else
+ {
+ m_piBuckets[iHash] = DPT_INVALID_SLOT;
+ }
+ }
+
+ if (patch->entry.iNext != DPT_INVALID_SLOT)
+ {
+ EntryPtr(patch->entry.iNext)->iPrev = patch->entry.iPrev;
+ }
+
+ if (patch->entry.iPrev != DPT_INVALID_SLOT)
+ {
+ EntryPtr(patch->entry.iNext)->iNext = patch->entry.iNext;
+ }
+
+ patch->entry.iNext = DPT_INVALID_SLOT;
+ patch->entry.iPrev = DPT_INVALID_SLOT;
+}
+
+void DebuggerPatchTable::SpliceInBackOf(DebuggerControllerPatch *patchAppend,
+ DebuggerControllerPatch *patchEnd)
+{
+ ULONG iAppend = ItemIndex((HASHENTRY*)patchAppend);
+ ULONG iEnd = ItemIndex((HASHENTRY*)patchEnd);
+
+ patchAppend->entry.iPrev = iEnd;
+ patchAppend->entry.iNext = patchEnd->entry.iNext;
+
+ if (patchAppend->entry.iNext != DPT_INVALID_SLOT)
+ EntryPtr(patchAppend->entry.iNext)->iPrev = iAppend;
+
+ patchEnd->entry.iNext = iAppend;
+}
+#endif
+
+//-----------------------------------------------------------------------------
+// Stack safety rules.
+// In general, we're safe to crawl whenever we're in preemptive mode.
+// We're also must be safe at any spot the thread could get synchronized,
+// because that means that the thread will be stopped to let the debugger shell
+// inspect it and that can definitely take stack traces.
+// Basically the only unsafe spot is in the middle of goofy stub with some
+// partially constructed frame while in coop mode.
+//-----------------------------------------------------------------------------
+
+// Safe if we're at certain types of patches.
+// See Patch::IsSafeForStackTrace for details.
+StackTraceTicket::StackTraceTicket(DebuggerControllerPatch * patch)
+{
+ _ASSERTE(patch != NULL);
+ _ASSERTE(patch->IsSafeForStackTrace());
+}
+
+// Safe if there was already another stack trace at this spot. (Grandfather clause)
+// This is commonly used for StepOut, which takes runs stacktraces to crawl up
+// the stack to find a place to patch.
+StackTraceTicket::StackTraceTicket(ControllerStackInfo * info)
+{
+ _ASSERTE(info != NULL);
+
+ // Ensure that the other stack info object actually executed (and thus was
+ // actually valid).
+ _ASSERTE(info->m_dbgExecuted);
+}
+
+// Safe b/c the context shows we're in native managed code.
+// This must be safe because we could always set a managed breakpoint by native
+// offset and thus synchronize the shell at this spot. So this is
+// a specific example of the Synchronized case. The fact that we don't actually
+// synchronize doesn't make us any less safe.
+StackTraceTicket::StackTraceTicket(const BYTE * ip)
+{
+ _ASSERTE(g_pEEInterface->IsManagedNativeCode(ip));
+}
+
+// Safe it we're at a Synchronized point point.
+StackTraceTicket::StackTraceTicket(Thread * pThread)
+{
+ _ASSERTE(pThread != NULL);
+
+ // If we're synchronized, the debugger should be stopped.
+ // That means all threads are synced and must be safe to take a stacktrace.
+ // Thus we don't even need to do a thread-specific check.
+ _ASSERTE(g_pDebugger->IsStopped());
+}
+
+// DebuggerUserBreakpoint has a special case of safety. See that ctor for details.
+StackTraceTicket::StackTraceTicket(DebuggerUserBreakpoint * p)
+{
+ _ASSERTE(p != NULL);
+}
+
+//void ControllerStackInfo::GetStackInfo(): GetStackInfo
+// is invoked by the user to trigger the stack walk. This will
+// cause the stack walk detailed in the class description to happen.
+// Thread* thread: The thread to do the stack walk on.
+// void* targetFP: Can be either NULL (meaning that the bottommost
+// frame is the target), or an frame pointer, meaning that the
+// caller wants information about a specific frame.
+// CONTEXT* pContext: A pointer to a CONTEXT structure. Can be null,
+// we use our temp context.
+// bool suppressUMChainFromComPlusMethodFrameGeneric - A ridiculous flag that is trying to narrowly
+// target a fix for issue 650903.
+// StackTraceTicket - ticket to ensure that we actually have permission for this stacktrace
+void ControllerStackInfo::GetStackInfo(
+ StackTraceTicket ticket,
+ Thread *thread,
+ FramePointer targetFP,
+ CONTEXT *pContext,
+ bool suppressUMChainFromComPlusMethodFrameGeneric
+ )
+{
+ _ASSERTE(thread != NULL);
+
+ BOOL contextValid = (pContext != NULL);
+ if (!contextValid)
+ {
+ // We're assuming the thread is protected w/ a frame (which includes the redirection
+ // case). The stackwalker will use that protection to prime the context.
+ pContext = &this->m_tempContext;
+ }
+ else
+ {
+ // If we provided an explicit context for this thread, it better not be redirected.
+ _ASSERTE(!ISREDIRECTEDTHREAD(thread));
+ }
+
+ // Mark this stackwalk as valid so that it can in turn be used to grandfather
+ // in other stackwalks.
+ INDEBUG(m_dbgExecuted = true);
+
+ m_activeFound = false;
+ m_returnFound = false;
+ m_bottomFP = LEAF_MOST_FRAME;
+ m_targetFP = targetFP;
+ m_targetFrameFound = (m_targetFP == LEAF_MOST_FRAME);
+ m_specialChainReason = CHAIN_NONE;
+ m_suppressUMChainFromComPlusMethodFrameGeneric = suppressUMChainFromComPlusMethodFrameGeneric;
+
+ int result = DebuggerWalkStack(thread,
+ LEAF_MOST_FRAME,
+ pContext,
+ contextValid,
+ WalkStack,
+ (void *) this,
+ FALSE);
+
+ _ASSERTE(m_activeFound); // All threads have at least one unmanaged frame
+
+ if (result == SWA_DONE)
+ {
+ _ASSERTE(!m_returnFound);
+ m_returnFrame = m_activeFrame;
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// This function "undoes" an unwind, i.e. it takes the active frame (the current frame)
+// and sets it to be the return frame (the caller frame). Currently it is only used by
+// the stepper to step out of an LCG method. See DebuggerStepper::DetectHandleLCGMethods()
+// for more information.
+//
+// Assumptions:
+// The current frame is valid on entry.
+//
+// Notes:
+// After this function returns, the active frame on this instance of ControllerStackInfo will no longer be valid.
+//
+// This function is specifically for DebuggerStepper::DetectHandleLCGMethods(). Using it in other scencarios may
+// require additional changes.
+//
+
+void ControllerStackInfo::SetReturnFrameWithActiveFrame()
+{
+ // Copy the active frame into the return frame.
+ m_returnFound = true;
+ m_returnFrame = m_activeFrame;
+
+ // Invalidate the active frame.
+ m_activeFound = false;
+ memset(&(m_activeFrame), 0, sizeof(m_activeFrame));
+ m_activeFrame.fp = LEAF_MOST_FRAME;
+}
+
+// Fill in a controller-stack info.
+StackWalkAction ControllerStackInfo::WalkStack(FrameInfo *pInfo, void *data)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!pInfo->HasStubFrame()); // we didn't ask for stub frames.
+
+ ControllerStackInfo *i = (ControllerStackInfo *) data;
+
+ //save this info away for later use
+ if (i->m_bottomFP == LEAF_MOST_FRAME)
+ i->m_bottomFP = pInfo->fp;
+
+ // This is part of the targetted fix for issue 650903. (See the other
+ // parts in in code:TrackUMChain and code:DebuggerStepper::TrapStepOut.)
+ // pInfo->fIgnoreThisFrameIfSuppressingUMChainFromComPlusMethodFrameGeneric has been
+ // set by TrackUMChain to help us remember that the current frame we're looking at is
+ // ComPlusMethodFrameGeneric (we can't rely on looking at pInfo->frame to check
+ // this), and i->m_suppressUMChainFromComPlusMethodFrameGeneric has been set by the
+ // dude initiating this walk to remind us that our goal in life is to do a Step Out
+ // during managed-only debugging. These two things together tell us we should ignore
+ // this frame, rather than erroneously identifying it as the target frame.
+#ifdef FEATURE_COMINTEROP
+ if(i->m_suppressUMChainFromComPlusMethodFrameGeneric &&
+ (pInfo->chainReason == CHAIN_ENTER_UNMANAGED) &&
+ (pInfo->fIgnoreThisFrameIfSuppressingUMChainFromComPlusMethodFrameGeneric))
+ {
+ return SWA_CONTINUE;
+ }
+#endif // FEATURE_COMINTEROP
+
+ //have we reached the correct frame yet?
+ if (!i->m_targetFrameFound &&
+ IsEqualOrCloserToLeaf(i->m_targetFP, pInfo->fp))
+ {
+ i->m_targetFrameFound = true;
+ }
+
+ if (i->m_targetFrameFound )
+ {
+ // Ignore Enter-managed chains.
+ if (pInfo->chainReason == CHAIN_ENTER_MANAGED)
+ {
+ return SWA_CONTINUE;
+ }
+
+ if (i->m_activeFound )
+ {
+ // We care if the current frame is unmanaged (in case a managed stepper is initiated
+ // on a thread currently in unmanaged code). But since we can't step-out to UM frames,
+ // we can just skip them in the stack walk.
+ if (!pInfo->managed)
+ {
+ return SWA_CONTINUE;
+ }
+
+ if (pInfo->chainReason == CHAIN_CLASS_INIT)
+ i->m_specialChainReason = pInfo->chainReason;
+
+ if (pInfo->fp != i->m_activeFrame.fp) // avoid dups
+ {
+ i->m_returnFrame = *pInfo;
+
+#if defined(WIN64EXCEPTIONS)
+ CopyREGDISPLAY(&(i->m_returnFrame.registers), &(pInfo->registers));
+#endif // WIN64EXCEPTIONS
+
+ i->m_returnFound = true;
+
+ return SWA_ABORT;
+ }
+ }
+ else
+ {
+ i->m_activeFrame = *pInfo;
+
+#if defined(WIN64EXCEPTIONS)
+ CopyREGDISPLAY(&(i->m_activeFrame.registers), &(pInfo->registers));
+#endif // WIN64EXCEPTIONS
+
+ i->m_activeFound = true;
+
+ return SWA_CONTINUE;
+ }
+ }
+
+ return SWA_CONTINUE;
+}
+
+
+//
+// Note that patches may be reallocated - do not keep a pointer to a patch.
+//
+DebuggerControllerPatch *DebuggerPatchTable::AddPatchForMethodDef(DebuggerController *controller,
+ Module *module,
+ mdMethodDef md,
+ size_t offset,
+ DebuggerPatchKind kind,
+ FramePointer fp,
+ AppDomain *pAppDomain,
+ SIZE_T masterEnCVersion,
+ DebuggerJitInfo *dji)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG( (LF_CORDB,LL_INFO10000,"DCP:AddPatchForMethodDef unbound "
+ "relative in methodDef 0x%x with dji 0x%x "
+ "controller:0x%x AD:0x%x\n", md,
+ dji, controller, pAppDomain));
+
+ DebuggerFunctionKey key;
+
+ key.module = module;
+ key.md = md;
+
+ // Get a new uninitialized patch object
+ DebuggerControllerPatch *patch =
+ (DebuggerControllerPatch *) Add(HashKey(&key));
+ if (patch == NULL)
+ {
+ ThrowOutOfMemory();
+ }
+#ifndef _TARGET_ARM_
+ patch->Initialize();
+#endif
+
+ //initialize the patch data structure.
+ InitializePRD(&(patch->opcode));
+ patch->controller = controller;
+ patch->key.module = module;
+ patch->key.md = md;
+ patch->offset = offset;
+ patch->offsetIsIL = (kind == PATCH_KIND_IL_MASTER);
+ patch->address = NULL;
+ patch->fp = fp;
+ patch->trace.Bad_SetTraceType(DPT_DEFAULT_TRACE_TYPE); // TRACE_OTHER
+ patch->refCount = 1; // AddRef()
+ patch->fSaveOpcode = false;
+ patch->pAppDomain = pAppDomain;
+ patch->pid = m_pid++;
+
+ if (kind == PATCH_KIND_IL_MASTER)
+ {
+ _ASSERTE(dji == NULL);
+ patch->encVersion = masterEnCVersion;
+ }
+ else
+ {
+ patch->dji = dji;
+ }
+ patch->kind = kind;
+
+ if (dji)
+ LOG((LF_CORDB,LL_INFO10000,"AddPatchForMethodDef w/ version 0x%04x, "
+ "pid:0x%x\n", dji->m_encVersion, patch->pid));
+ else if (kind == PATCH_KIND_IL_MASTER)
+ LOG((LF_CORDB,LL_INFO10000,"AddPatchForMethodDef w/ version 0x%04x, "
+ "pid:0x%x\n", masterEnCVersion,patch->pid));
+ else
+ LOG((LF_CORDB,LL_INFO10000,"AddPatchForMethodDef w/ no dji or dmi, pid:0x%x\n",patch->pid));
+
+
+ // This patch is not yet bound or activated
+ _ASSERTE( !patch->IsBound() );
+ _ASSERTE( !patch->IsActivated() );
+
+ // The only kind of patch with IL offset is the IL master patch.
+ _ASSERTE(patch->IsILMasterPatch() || patch->offsetIsIL == FALSE);
+ return patch;
+}
+
+// Create and bind a patch to the specified address
+// The caller should immediately activate the patch since we typically expect bound patches
+// will always be activated.
+DebuggerControllerPatch *DebuggerPatchTable::AddPatchForAddress(DebuggerController *controller,
+ MethodDesc *fd,
+ size_t offset,
+ DebuggerPatchKind kind,
+ CORDB_ADDRESS_TYPE *address,
+ FramePointer fp,
+ AppDomain *pAppDomain,
+ DebuggerJitInfo *dji,
+ SIZE_T pid,
+ TraceType traceType)
+
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+
+ _ASSERTE(kind == PATCH_KIND_NATIVE_MANAGED || kind == PATCH_KIND_NATIVE_UNMANAGED);
+ LOG((LF_CORDB,LL_INFO10000,"DCP:AddPatchForAddress bound "
+ "absolute to 0x%x with dji 0x%x (mdDef:0x%x) "
+ "controller:0x%x AD:0x%x\n",
+ address, dji, (fd!=NULL?fd->GetMemberDef():0), controller,
+ pAppDomain));
+
+ // get new uninitialized patch object
+ DebuggerControllerPatch *patch =
+ (DebuggerControllerPatch *) Add(HashAddress(address));
+
+ if (patch == NULL)
+ {
+ ThrowOutOfMemory();
+ }
+#ifndef _TARGET_ARM_
+ patch->Initialize();
+#endif
+
+ // initialize the patch data structure
+ InitializePRD(&(patch->opcode));
+ patch->controller = controller;
+
+ if (fd == NULL)
+ {
+ patch->key.module = NULL;
+ patch->key.md = mdTokenNil;
+ }
+ else
+ {
+ patch->key.module = g_pEEInterface->MethodDescGetModule(fd);
+ patch->key.md = fd->GetMemberDef();
+ }
+ patch->offset = offset;
+ patch->offsetIsIL = FALSE;
+ patch->address = address;
+ patch->fp = fp;
+ patch->trace.Bad_SetTraceType(traceType);
+ patch->refCount = 1; // AddRef()
+ patch->fSaveOpcode = false;
+ patch->pAppDomain = pAppDomain;
+ if (pid == DCP_PID_INVALID)
+ patch->pid = m_pid++;
+ else
+ patch->pid = pid;
+
+ patch->dji = dji;
+ patch->kind = kind;
+
+ if (dji == NULL)
+ LOG((LF_CORDB,LL_INFO10000,"AddPatchForAddress w/ version with no dji, pid:0x%x\n", patch->pid));
+ else
+ {
+ LOG((LF_CORDB,LL_INFO10000,"AddPatchForAddress w/ version 0x%04x, "
+ "pid:0x%x\n", dji->m_methodInfo->GetCurrentEnCVersion(), patch->pid));
+
+ _ASSERTE( fd==NULL || fd == dji->m_fd );
+ }
+
+ SortPatchIntoPatchList(&patch);
+
+ // This patch is bound but not yet activated
+ _ASSERTE( patch->IsBound() );
+ _ASSERTE( !patch->IsActivated() );
+
+ // The only kind of patch with IL offset is the IL master patch.
+ _ASSERTE(patch->IsILMasterPatch() || patch->offsetIsIL == FALSE);
+ return patch;
+}
+
+// Set the native address for this patch.
+void DebuggerPatchTable::BindPatch(DebuggerControllerPatch *patch, CORDB_ADDRESS_TYPE *address)
+{
+ _ASSERTE(patch != NULL);
+ _ASSERTE(address != NULL);
+ _ASSERTE( !patch->IsILMasterPatch() );
+ _ASSERTE(!patch->IsBound() );
+
+ //Since the actual patch doesn't move, we don't have to worry about
+ //zeroing out the opcode field (see lenghty comment above)
+ // Since the patch is double-hashed based off Address, if we change the address,
+ // we must remove and reinsert the patch.
+ CHashTable::Delete(HashKey(&patch->key), ItemIndex((HASHENTRY*)patch));
+
+ patch->address = address;
+
+ CHashTable::Add(HashAddress(address), ItemIndex((HASHENTRY*)patch));
+
+ SortPatchIntoPatchList(&patch);
+
+ _ASSERTE(patch->IsBound() );
+ _ASSERTE(!patch->IsActivated() );
+}
+
+// Disassociate a patch from a specific code address.
+void DebuggerPatchTable::UnbindPatch(DebuggerControllerPatch *patch)
+{
+ _ASSERTE(patch != NULL);
+ _ASSERTE(patch->kind != PATCH_KIND_IL_MASTER);
+ _ASSERTE(patch->IsBound() );
+ _ASSERTE(!patch->IsActivated() );
+
+ //<REVISIT_TODO>@todo We're hosed if the patch hasn't been primed with
+ // this info & we can't get it...</REVISIT_TODO>
+ if (patch->key.module == NULL ||
+ patch->key.md == mdTokenNil)
+ {
+ MethodDesc *fd = g_pEEInterface->GetNativeCodeMethodDesc(
+ dac_cast<PCODE>(patch->address));
+ _ASSERTE( fd != NULL );
+ patch->key.module = g_pEEInterface->MethodDescGetModule(fd);
+ patch->key.md = fd->GetMemberDef();
+ }
+
+ // Update it's index entry in the table to use it's unbound key
+ // Since the patch is double-hashed based off Address, if we change the address,
+ // we must remove and reinsert the patch.
+ CHashTable::Delete( HashAddress(patch->address),
+ ItemIndex((HASHENTRY*)patch));
+
+ patch->address = NULL; // we're no longer bound to this address
+
+ CHashTable::Add( HashKey(&patch->key),
+ ItemIndex((HASHENTRY*)patch));
+
+ _ASSERTE(!patch->IsBound() );
+
+}
+
+void DebuggerPatchTable::RemovePatch(DebuggerControllerPatch *patch)
+{
+ // Since we're deleting this patch, it must not be activated (i.e. it must not have a stored opcode)
+ _ASSERTE( !patch->IsActivated() );
+#ifndef _TARGET_ARM_
+ patch->DoCleanup();
+#endif
+
+ //
+ // Because of the implementation of CHashTable, we can safely
+ // delete elements while iterating through the table. This
+ // behavior is relied upon - do not change to a different
+ // implementation without considering this fact.
+ //
+ Delete(Hash(patch), (HASHENTRY *) patch);
+
+}
+
+DebuggerControllerPatch *DebuggerPatchTable::GetNextPatch(DebuggerControllerPatch *prev)
+{
+ ULONG iNext;
+ HASHENTRY *psEntry;
+
+ // Start at the next entry in the chain.
+ // @todo - note that: EntryPtr(ItemIndex(x)) == x
+ iNext = EntryPtr(ItemIndex((HASHENTRY*)prev))->iNext;
+
+ // Search until we hit the end.
+ while (iNext != UINT32_MAX)
+ {
+ // Compare the keys.
+ psEntry = EntryPtr(iNext);
+
+ // Careful here... we can hash the entries in this table
+ // by two types of keys. In this type of search, the type
+ // of the second key (psEntry) does not necessarily
+ // indicate the type of the first key (prev), so we have
+ // to check for sure.
+ DebuggerControllerPatch *pc2 = (DebuggerControllerPatch*)psEntry;
+
+ if (((pc2->address == NULL) && (prev->address == NULL)) ||
+ ((pc2->address != NULL) && (prev->address != NULL)))
+ if (!Cmp(Key(prev), psEntry))
+ return pc2;
+
+ // Advance to the next item in the chain.
+ iNext = psEntry->iNext;
+ }
+
+ return NULL;
+}
+
+#ifdef _DEBUG_PATCH_TABLE
+ // DEBUG An internal debugging routine, it iterates
+ // through the hashtable, stopping at every
+ // single entry, no matter what it's state. For this to
+ // compile, you're going to have to add friend status
+ // of this class to CHashTableAndData in
+ // to $\Com99\Src\inc\UtilCode.h
+void DebuggerPatchTable::CheckPatchTable()
+{
+ if (NULL != m_pcEntries)
+ {
+ DebuggerControllerPatch *dcp;
+ int i = 0;
+ while (i++ <m_iEntries)
+ {
+ dcp = (DebuggerControllerPatch*)&(((DebuggerControllerPatch *)m_pcEntries)[i]);
+ if (dcp->opcode != 0 )
+ {
+ LOG((LF_CORDB,LL_INFO1000, "dcp->addr:0x%8x "
+ "mdMD:0x%8x, offset:0x%x, native:%d\n",
+ dcp->address, dcp->key.md, dcp->offset,
+ dcp->IsNativePatch()));
+ }
+ }
+ }
+}
+
+#endif // _DEBUG_PATCH_TABLE
+
+// Count how many patches are in the table.
+// Use for asserts
+int DebuggerPatchTable::GetNumberOfPatches()
+{
+ int total = 0;
+
+ if (NULL != m_pcEntries)
+ {
+ DebuggerControllerPatch *dcp;
+ ULONG i = 0;
+
+ while (i++ <m_iEntries)
+ {
+ dcp = (DebuggerControllerPatch*)&(((DebuggerControllerPatch *)m_pcEntries)[i]);
+
+ if (dcp->IsActivated() || !dcp->IsFree())
+ total++;
+ }
+ }
+ return total;
+}
+
+#if defined(_DEBUG)
+//-----------------------------------------------------------------------------
+// Debug check that we only have 1 thread-starter per thread.
+// pNew - the new DTS. We'll make sure there's not already a DTS on this thread.
+//-----------------------------------------------------------------------------
+void DebuggerController::EnsureUniqueThreadStarter(DebuggerThreadStarter * pNew)
+{
+ // This lock should be safe to take since our base class ctor takes it.
+ ControllerLockHolder lockController;
+ DebuggerController * pExisting = g_controllers;
+ while(pExisting != NULL)
+ {
+ if (pExisting->GetDCType() == DEBUGGER_CONTROLLER_THREAD_STARTER)
+ {
+ if (pExisting != pNew)
+ {
+ // If we have 2 thread starters, they'd better be on different threads.
+ _ASSERTE((pExisting->GetThread() != pNew->GetThread()));
+ }
+ }
+ pExisting = pExisting->m_next;
+ }
+}
+#endif
+
+//-----------------------------------------------------------------------------
+// If we have a thread-starter on the given EE thread, make sure it's cancel.
+// Thread-Starters normally delete themselves when they fire. But if the EE
+// destroys the thread before it fires, then we'd still have an active DTS.
+//-----------------------------------------------------------------------------
+void DebuggerController::CancelOutstandingThreadStarter(Thread * pThread)
+{
+ _ASSERTE(pThread != NULL);
+ LOG((LF_CORDB, LL_EVERYTHING, "DC:CancelOutstandingThreadStarter - checking on thread =0x%p\n", pThread));
+
+ ControllerLockHolder lockController;
+ DebuggerController * p = g_controllers;
+ while(p != NULL)
+ {
+ if (p->GetDCType() == DEBUGGER_CONTROLLER_THREAD_STARTER)
+ {
+ if (p->GetThread() == pThread)
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "DC:CancelOutstandingThreadStarter, pThread=0x%p, Found=0x%p\n", p));
+
+ // There's only 1 DTS per thread, so once we find it, we can quit.
+ p->Delete();
+ p = NULL;
+ break;
+ }
+ }
+ p = p->m_next;
+ }
+ // The common case is that our DTS hit its patch and did a SendEvent (and
+ // deleted itself). So usually we'll get through the whole list w/o deleting anything.
+
+}
+
+//void DebuggerController::Initialize() Sets up the static
+// variables for the static DebuggerController class.
+// How: Sets g_runningOnWin95, initializes the critical section
+HRESULT DebuggerController::Initialize()
+{
+ CONTRACT(HRESULT)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ // This can be called in an "early attach" case, so DebuggerIsInvolved()
+ // will be b/c we don't realize the debugger's attaching to us.
+ //PRECONDITION(DebuggerIsInvolved());
+ POSTCONDITION(CheckPointer(g_patches));
+ POSTCONDITION(RETVAL == S_OK);
+ }
+ CONTRACT_END;
+
+ if (g_patches == NULL)
+ {
+ ZeroMemory(&g_criticalSection, sizeof(g_criticalSection)); // Init() expects zero-init memory.
+
+ // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst.
+ // If you remove this flag, we will switch to preemptive mode when entering
+ // g_criticalSection, which means all functions that enter it will become
+ // GC_TRIGGERS. (This includes all uses of ControllerLockHolder.) So be sure
+ // to update the contracts if you remove this flag.
+ g_criticalSection.Init(CrstDebuggerController,
+ (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_REENTRANCY | CRST_DEBUGGER_THREAD));
+
+ g_patches = new (interopsafe) DebuggerPatchTable();
+ _ASSERTE(g_patches != NULL); // throws on oom
+
+ HRESULT hr = g_patches->Init();
+
+ if (FAILED(hr))
+ {
+ DeleteInteropSafe(g_patches);
+ ThrowHR(hr);
+ }
+
+ g_patchTableValid = TRUE;
+ TRACE_ALLOC(g_patches);
+ }
+
+ _ASSERTE(g_patches != NULL);
+
+ RETURN (S_OK);
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Constructor for a controller
+//
+// Arguments:
+// pThread - thread that controller has affinity to. NULL if no thread - affinity.
+// pAppdomain - appdomain that controller has affinity to. NULL if no AD affinity.
+//
+//
+// Notes:
+// "Affinity" is per-controller specific. Affinity is generally passed on to
+// any patches the controller creates. So if a controller has affinity to Thread X,
+// then any patches it creates will only fire on Thread-X.
+//
+//---------------------------------------------------------------------------------------
+
+DebuggerController::DebuggerController(Thread * pThread, AppDomain * pAppDomain)
+ : m_pAppDomain(pAppDomain),
+ m_thread(pThread),
+ m_singleStep(false),
+ m_exceptionHook(false),
+ m_traceCall(0),
+ m_traceCallFP(ROOT_MOST_FRAME),
+ m_unwindFP(LEAF_MOST_FRAME),
+ m_eventQueuedCount(0),
+ m_deleted(false),
+ m_fEnableMethodEnter(false)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ CONSTRUCTOR_CHECK;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "DC: 0x%x m_eventQueuedCount to 0 - DC::DC\n", this));
+ ControllerLockHolder lockController;
+ {
+ m_next = g_controllers;
+ g_controllers = this;
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Debugger::Controller::DeleteAllControlers - deletes all debugger contollers
+//
+// Arguments:
+// None
+//
+// Return Value:
+// None
+//
+// Notes:
+// This is used at detach time to remove all DebuggerControllers. This will remove all
+// patches and do whatever other cleanup individual DebuggerControllers consider
+// necessary to allow the debugger to detach and the process to run normally.
+//
+
+void DebuggerController::DeleteAllControllers()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ControllerLockHolder lockController;
+ DebuggerController * pDebuggerController = g_controllers;
+ DebuggerController * pNextDebuggerController = NULL;
+
+ while (pDebuggerController != NULL)
+ {
+ pNextDebuggerController = pDebuggerController->m_next;
+ pDebuggerController->Delete();
+ pDebuggerController = pNextDebuggerController;
+ }
+}
+
+DebuggerController::~DebuggerController()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ DESTRUCTOR_CHECK;
+ }
+ CONTRACTL_END;
+
+ ControllerLockHolder lockController;
+
+ _ASSERTE(m_eventQueuedCount == 0);
+
+ DisableAll();
+
+ //
+ // Remove controller from list
+ //
+
+ DebuggerController **c;
+
+ c = &g_controllers;
+ while (*c != this)
+ c = &(*c)->m_next;
+
+ *c = m_next;
+
+}
+
+// void DebuggerController::Delete()
+// What: Marks an instance as deletable. If it's ref count
+// (see Enqueue, Dequeue) is currently zero, it actually gets deleted
+// How: Set m_deleted to true. If m_eventQueuedCount==0, delete this
+void DebuggerController::Delete()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (m_eventQueuedCount == 0)
+ {
+ LOG((LF_CORDB|LF_ENC, LL_INFO100000, "DC::Delete: actual delete of this:0x%x!\n", this));
+ TRACE_FREE(this);
+ DeleteInteropSafe(this);
+ }
+ else
+ {
+ LOG((LF_CORDB|LF_ENC, LL_INFO100000, "DC::Delete: marked for "
+ "future delete of this:0x%x!\n", this));
+ LOG((LF_CORDB|LF_ENC, LL_INFO10000, "DC:0x%x m_eventQueuedCount at 0x%x\n",
+ this, m_eventQueuedCount));
+ m_deleted = true;
+ }
+}
+
+//static
+void DebuggerController::AddRef(DebuggerControllerPatch *patch)
+{
+ patch->refCount++;
+}
+
+//static
+void DebuggerController::Release(DebuggerControllerPatch *patch)
+{
+ patch->refCount--;
+ if (patch->refCount == 0)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DCP::R: patch deleted, deactivating\n"));
+ DeactivatePatch(patch);
+ GetPatchTable()->RemovePatch(patch);
+ }
+}
+
+// void DebuggerController::DisableAll() DisableAll removes
+// all control from the controller. This includes all patches & page
+// protection. This will invoke Disable* for unwind,singlestep,
+// exceptionHook, and tracecall. It will also go through the patch table &
+// attempt to remove any and all patches that belong to this controller.
+// If the patch is currently triggering, then a Dispatch* method expects the
+// patch to be there after we return, so we instead simply mark the patch
+// itself as deleted.
+void DebuggerController::DisableAll()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_INFO1000, "DC::DisableAll\n"));
+ _ASSERTE(g_patches != NULL);
+
+ ControllerLockHolder ch;
+ {
+ //
+ // Remove controller's patches from list.
+ // Don't do this on shutdown because the shutdown thread may have killed another thread asynchronously
+ // thus leaving the patchtable in an inconsistent state such that we may fail trying to walk it.
+ // Since we're exiting anyways, leaving int3 in the code can't harm anybody.
+ //
+ if (!g_fProcessDetach)
+ {
+ HASHFIND f;
+ for (DebuggerControllerPatch *patch = g_patches->GetFirstPatch(&f);
+ patch != NULL;
+ patch = g_patches->GetNextPatch(&f))
+ {
+ if (patch->controller == this)
+ {
+ Release(patch);
+ }
+ }
+ }
+
+ if (m_singleStep)
+ DisableSingleStep();
+ if (m_exceptionHook)
+ DisableExceptionHook();
+ if (m_unwindFP != LEAF_MOST_FRAME)
+ DisableUnwind();
+ if (m_traceCall)
+ DisableTraceCall();
+ if (m_fEnableMethodEnter)
+ DisableMethodEnter();
+ }
+}
+
+// void DebuggerController::Enqueue() What: Does
+// reference counting so we don't toast a
+// DebuggerController while it's in a Dispatch queue.
+// Why: In DispatchPatchOrSingleStep, we can't hold locks when going
+// into PreEmptiveGC mode b/c we'll create a deadlock.
+// So we have to UnLock() prior to
+// EnablePreEmptiveGC(). But somebody else can show up and delete the
+// DebuggerControllers since we no longer have the lock. So we have to
+// do this reference counting thing to make sure that the controllers
+// don't get toasted as we're trying to invoke SendEvent on them. We have to
+// reaquire the lock before invoking Dequeue because Dequeue may
+// result in the controller being deleted, which would change the global
+// controller list.
+// How: InterlockIncrement( m_eventQueuedCount )
+void DebuggerController::Enqueue()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_eventQueuedCount++;
+ LOG((LF_CORDB, LL_INFO10000, "DC::Enq DC:0x%x m_eventQueuedCount at 0x%x\n",
+ this, m_eventQueuedCount));
+}
+
+// void DebuggerController::Dequeue() What: Does
+// reference counting so we don't toast a
+// DebuggerController while it's in a Dispatch queue.
+// How: InterlockDecrement( m_eventQueuedCount ), delete this if
+// m_eventQueuedCount == 0 AND m_deleted has been set to true
+void DebuggerController::Dequeue()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "DC::Deq DC:0x%x m_eventQueuedCount at 0x%x\n",
+ this, m_eventQueuedCount));
+ if (--m_eventQueuedCount == 0)
+ {
+ if (m_deleted)
+ {
+ TRACE_FREE(this);
+ DeleteInteropSafe(this);
+ }
+ }
+}
+
+
+// bool DebuggerController::BindPatch() If the method has
+// been JITted and isn't hashed by address already, then hash
+// it into the hashtable by address and not DebuggerFunctionKey.
+// If the patch->address field is nonzero, we're done.
+// Otherwise ask g_pEEInterface to FindLoadedMethodRefOrDef, then
+// GetFunctionAddress of the method, if the method is in IL,
+// MapILOffsetToNative. If everything else went Ok, we can now invoke
+// g_patches->BindPatch.
+// Returns: false if we know that we can't bind the patch immediately.
+// true if we either can bind the patch right now, or can't right now,
+// but might be able to in the future (eg, the method hasn't been JITted)
+
+// Have following outcomes:
+// 1) Succeeded in binding the patch to a raw address. patch->address is set.
+// (Note we still must apply the patch to put the int 3 in.)
+// returns true, *pFail = false
+//
+// 2) Fails to bind, but a future attempt may succeed. Obvious ex, for an IL-only
+// patch on an unjitted method.
+// returns false, *pFail = false
+//
+// 3) Fails to bind because something's wrong. Ex: bad IL offset, no DJI to do a
+// mapping with. Future calls will fail too.
+// returns false, *pFail = true
+bool DebuggerController::BindPatch(DebuggerControllerPatch *patch,
+ MethodDesc *fd,
+ CORDB_ADDRESS_TYPE *startAddr)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS; // from GetJitInfo
+ GC_NOTRIGGER;
+ MODE_ANY; // don't really care what mode we're in.
+
+ PRECONDITION(ThisMaybeHelperThread());
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(patch != NULL);
+ _ASSERTE(!patch->IsILMasterPatch());
+ _ASSERTE(fd != NULL);
+
+ //
+ // Translate patch to address, if it hasn't been already.
+ //
+
+ if (patch->address != NULL)
+ {
+ return true;
+ }
+
+ if (startAddr == NULL)
+ {
+ if (patch->HasDJI() && patch->GetDJI()->m_jitComplete)
+ {
+ startAddr = (CORDB_ADDRESS_TYPE *) CORDB_ADDRESS_TO_PTR(patch->GetDJI()->m_addrOfCode);
+ _ASSERTE(startAddr != NULL);
+ }
+ if (startAddr == NULL)
+ {
+ // Should not be trying to place patches on MethodDecs's for stubs.
+ // These stubs will never get jitted.
+ CONSISTENCY_CHECK_MSGF(!fd->IsWrapperStub(), ("Can't place patch at stub md %p, %s::%s",
+ fd, fd->m_pszDebugClassName, fd->m_pszDebugMethodName));
+
+ startAddr = (CORDB_ADDRESS_TYPE *)g_pEEInterface->GetFunctionAddress(fd);
+ //
+ // Code is not available yet to patch. The prestub should
+ // notify us when it is executed.
+ //
+ if (startAddr == NULL)
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "DC::BP:Patch at 0x%x not bindable yet.\n", patch->offset));
+
+ return false;
+ }
+ }
+ }
+
+ _ASSERTE(!g_pEEInterface->IsStub((const BYTE *)startAddr));
+
+ // If we've jitted, map to a native offset.
+ DebuggerJitInfo *info = g_pDebugger->GetJitInfo(fd, (const BYTE *)startAddr);
+
+#ifdef LOGGING
+ if (info == NULL)
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DC::BindPa: For startAddr 0x%x, didn't find a DJI\n", startAddr));
+ }
+#endif //LOGGING
+ if (info != NULL)
+ {
+ // There is a strange case with prejitted code and unjitted trace patches. We can enter this function
+ // with no DebuggerJitInfo created, then have the call just above this actually create the
+ // DebuggerJitInfo, which causes JitComplete to be called, which causes all patches to be bound! If this
+ // happens, then we don't need to continue here (its already been done recursivley) and we don't need to
+ // re-active the patch, so we return false from right here. We can check this by seeing if we suddently
+ // have the address in the patch set.
+ if (patch->address != NULL)
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DC::BindPa: patch bound recursivley by GetJitInfo, bailing...\n"));
+ return false;
+ }
+
+ LOG((LF_CORDB,LL_INFO10000, "DC::BindPa: For startAddr 0x%x, got DJI "
+ "0x%x, from 0x%x size: 0x%x\n", startAddr, info, info->m_addrOfCode, info->m_sizeOfCode));
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "DC::BP:Trying to bind patch in %s::%s version %d\n",
+ fd->m_pszDebugClassName, fd->m_pszDebugMethodName, info ? info->m_encVersion : (SIZE_T)-1));
+
+ _ASSERTE(g_patches != NULL);
+
+ CORDB_ADDRESS_TYPE *addr = (CORDB_ADDRESS_TYPE *)
+ CodeRegionInfo::GetCodeRegionInfo(NULL, NULL, startAddr).OffsetToAddress(patch->offset);
+ g_patches->BindPatch(patch, addr);
+
+ LOG((LF_CORDB, LL_INFO10000, "DC::BP:Binding patch at 0x%x(off:%x)\n", addr, patch->offset));
+
+ return true;
+}
+
+// bool DebuggerController::ApplyPatch() applies
+// the patch described to the code, and
+// remembers the replaced opcode. Note that the same address
+// cannot be patched twice at the same time.
+// Grabs the opcode & stores in patch, then sets a break
+// instruction for either native or IL.
+// VirtualProtect & some macros. Returns false if anything
+// went bad.
+// DebuggerControllerPatch *patch: The patch, indicates where
+// to set the INT3 instruction
+// Returns: true if the user break instruction was successfully
+// placed into the code-stream, false otherwise
+bool DebuggerController::ApplyPatch(DebuggerControllerPatch *patch)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DC::ApplyPatch at addr 0x%p\n",
+ patch->address));
+
+ // If we try to apply an already applied patch, we'll overide our saved opcode
+ // with the break opcode and end up getting a break in out patch bypass buffer.
+ _ASSERTE(!patch->IsActivated() );
+ _ASSERTE(patch->IsBound());
+
+ // Note we may be patching at certain "blessed" points in mscorwks.
+ // This is very dangerous b/c we can't be sure patch->Address is blessed or not.
+
+
+ //
+ // Apply the patch.
+ //
+ _ASSERTE(!(g_pConfig->GetGCStressLevel() & (EEConfig::GCSTRESS_INSTR_JIT|EEConfig::GCSTRESS_INSTR_NGEN))
+ && "Debugger does not work with GCSTRESS 4");
+
+ if (patch->IsNativePatch())
+ {
+ if (patch->fSaveOpcode)
+ {
+ // We only used SaveOpcode for when we've moved code, so
+ // the patch should already be there.
+ patch->opcode = patch->opcodeSaved;
+ _ASSERTE( AddressIsBreakpoint(patch->address) );
+ return true;
+ }
+
+#if _DEBUG
+ VerifyExecutableAddress((BYTE*)patch->address);
+#endif
+
+ LPVOID baseAddress = (LPVOID)(patch->address);
+
+ DWORD oldProt;
+
+ if (!VirtualProtect(baseAddress,
+ CORDbg_BREAK_INSTRUCTION_SIZE,
+ PAGE_EXECUTE_READWRITE, &oldProt))
+ {
+ _ASSERTE(!"VirtualProtect of code page failed");
+ return false;
+ }
+
+ patch->opcode = CORDbgGetInstruction(patch->address);
+
+ CORDbgInsertBreakpoint((CORDB_ADDRESS_TYPE *)patch->address);
+ LOG((LF_CORDB, LL_EVERYTHING, "Breakpoint was inserted\n"));
+
+ if (!VirtualProtect(baseAddress,
+ CORDbg_BREAK_INSTRUCTION_SIZE,
+ oldProt, &oldProt))
+ {
+ _ASSERTE(!"VirtualProtect of code page failed");
+ return false;
+ }
+ }
+// TODO: : determine if this is needed for AMD64
+#if defined(_TARGET_X86_) //REVISIT_TODO what is this?!
+ else
+ {
+ DWORD oldProt;
+
+ //
+ // !!! IL patch logic assumes reference insruction encoding
+ //
+ if (!VirtualProtect((void *) patch->address, 2,
+ PAGE_EXECUTE_READWRITE, &oldProt))
+ {
+ _ASSERTE(!"VirtualProtect of code page failed");
+ return false;
+ }
+ patch->opcode =
+ (unsigned int) *(unsigned short*)(patch->address+1);
+
+ _ASSERTE(patch->opcode != CEE_BREAK);
+
+ *(unsigned short *) (patch->address+1) = CEE_BREAK;
+
+ if (!VirtualProtect((void *) patch->address, 2, oldProt, &oldProt))
+ {
+ _ASSERTE(!"VirtualProtect of code page failed");
+ return false;
+ }
+ }
+#endif //_TARGET_X86_
+
+ return true;
+}
+
+// bool DebuggerController::UnapplyPatch()
+// UnapplyPatch removes the patch described by the patch.
+// (CopyOpcodeFromAddrToPatch, in reverse.)
+// Looks a lot like CopyOpcodeFromAddrToPatch, except that we use a macro to
+// copy the instruction back to the code-stream & immediately set the
+// opcode field to 0 so ReadMemory,WriteMemory will work right.
+// Note that it's very important to zero out the opcode field, as it
+// is used by the right side to determine if a patch is
+// valid or not.
+// NO LOCKING
+// DebuggerControllerPatch * patch: Patch to remove
+// Returns: true if the patch was unapplied, false otherwise
+bool DebuggerController::UnapplyPatch(DebuggerControllerPatch *patch)
+{
+ _ASSERTE(patch->address != NULL);
+ _ASSERTE(patch->IsActivated() );
+
+ LOG((LF_CORDB,LL_INFO1000, "DC::UP unapply patch at addr 0x%p\n",
+ patch->address));
+
+ if (patch->IsNativePatch())
+ {
+ if (patch->fSaveOpcode)
+ {
+ // We're doing this for MoveCode, and we don't want to
+ // overwrite something if we don't get moved far enough.
+ patch->opcodeSaved = patch->opcode;
+ InitializePRD(&(patch->opcode));
+ _ASSERTE( !patch->IsActivated() );
+ return true;
+ }
+
+ LPVOID baseAddress = (LPVOID)(patch->address);
+
+ DWORD oldProt;
+
+ if (!VirtualProtect(baseAddress,
+ CORDbg_BREAK_INSTRUCTION_SIZE,
+ PAGE_EXECUTE_READWRITE, &oldProt))
+ {
+ //
+ // We may be trying to remove a patch from memory
+ // which has been unmapped. We can ignore the
+ // error in this case.
+ //
+ InitializePRD(&(patch->opcode));
+ return false;
+ }
+
+ CORDbgSetInstruction((CORDB_ADDRESS_TYPE *)patch->address, patch->opcode);
+
+ //VERY IMPORTANT to zero out opcode, else we might mistake
+ //this patch for an active on on ReadMem/WriteMem (see
+ //header file comment)
+ InitializePRD(&(patch->opcode));
+
+ if (!VirtualProtect(baseAddress,
+ CORDbg_BREAK_INSTRUCTION_SIZE,
+ oldProt, &oldProt))
+ {
+ _ASSERTE(!"VirtualProtect of code page failed");
+ return false;
+ }
+ }
+ else
+ {
+ DWORD oldProt;
+
+ if (!VirtualProtect((void *) patch->address, 2,
+ PAGE_EXECUTE_READWRITE, &oldProt))
+ {
+ //
+ // We may be trying to remove a patch from memory
+ // which has been unmapped. We can ignore the
+ // error in this case.
+ //
+ InitializePRD(&(patch->opcode));
+ return false;
+ }
+
+ //
+ // !!! IL patch logic assumes reference encoding
+ //
+// TODO: : determine if this is needed for AMD64
+#if defined(_TARGET_X86_)
+ _ASSERTE(*(unsigned short*)(patch->address+1) == CEE_BREAK);
+
+ *(unsigned short *) (patch->address+1)
+ = (unsigned short) patch->opcode;
+#endif //this makes no sense on anything but X86
+ //VERY IMPORTANT to zero out opcode, else we might mistake
+ //this patch for an active on on ReadMem/WriteMem (see
+ //header file comment
+ InitializePRD(&(patch->opcode));
+
+ if (!VirtualProtect((void *) patch->address, 2, oldProt, &oldProt))
+ {
+ _ASSERTE(!"VirtualProtect of code page failed");
+ return false;
+ }
+ }
+
+ _ASSERTE( !patch->IsActivated() );
+ _ASSERTE( patch->IsBound() );
+ return true;
+}
+
+// void DebuggerController::UnapplyPatchAt()
+// NO LOCKING
+// UnapplyPatchAt removes the patch from a copy of the patched code.
+// Like UnapplyPatch, except that we don't bother checking
+// memory permissions, but instead replace the breakpoint instruction
+// with the opcode at an arbitrary memory address.
+void DebuggerController::UnapplyPatchAt(DebuggerControllerPatch *patch,
+ CORDB_ADDRESS_TYPE *address)
+{
+ _ASSERTE(patch->IsBound() );
+
+ if (patch->IsNativePatch())
+ {
+ CORDbgSetInstruction((CORDB_ADDRESS_TYPE *)address, patch->opcode);
+ //note that we don't have to zero out opcode field
+ //since we're unapplying at something other than
+ //the original spot. We assert this is true:
+ _ASSERTE( patch->address != address );
+ }
+ else
+ {
+ //
+ // !!! IL patch logic assumes reference encoding
+ //
+// TODO: : determine if this is needed for AMD64
+#ifdef _TARGET_X86_
+ _ASSERTE(*(unsigned short*)(address+1) == CEE_BREAK);
+
+ *(unsigned short *) (address+1)
+ = (unsigned short) patch->opcode;
+ _ASSERTE( patch->address != address );
+#endif // this makes no sense on anything but X86
+ }
+}
+
+// bool DebuggerController::IsPatched() Is there a patch at addr?
+// How: if fNative && the instruction at addr is the break
+// instruction for this platform.
+bool DebuggerController::IsPatched(CORDB_ADDRESS_TYPE *address, BOOL native)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (native)
+ {
+ return AddressIsBreakpoint(address);
+ }
+ else
+ return false;
+}
+
+// DWORD DebuggerController::GetPatchedOpcode() Gets the opcode
+// at addr, 'looking underneath' any patches if needed.
+// GetPatchedInstruction is a function for the EE to call to "see through"
+// a patch to the opcodes which was patched.
+// How: Lock() grab opcode directly unless there's a patch, in
+// which case grab it out of the patch table.
+// BYTE * address: The address that we want to 'see through'
+// Returns: DWORD value, that is the opcode that should really be there,
+// if we hadn't placed a patch there. If we haven't placed a patch
+// there, then we'll see the actual opcode at that address.
+PRD_TYPE DebuggerController::GetPatchedOpcode(CORDB_ADDRESS_TYPE *address)
+{
+ _ASSERTE(g_patches != NULL);
+
+ PRD_TYPE opcode;
+ ZeroMemory(&opcode, sizeof(opcode));
+
+ ControllerLockHolder lockController;
+
+ //
+ // Look for a patch at the address
+ //
+
+ DebuggerControllerPatch *patch = g_patches->GetPatch((CORDB_ADDRESS_TYPE *)address);
+
+ if (patch != NULL)
+ {
+ // Since we got the patch at this address, is must by definition be bound to that address
+ _ASSERTE( patch->IsBound() );
+ _ASSERTE( patch->address == address );
+ // If we're going to be returning it's opcode, then the patch must also be activated
+ _ASSERTE( patch->IsActivated() );
+ opcode = patch->opcode;
+ }
+ else
+ {
+ //
+ // Patch was not found - it either is not our patch, or it has
+ // just been removed. In either case, just return the current
+ // opcode.
+ //
+
+ if (g_pEEInterface->IsManagedNativeCode((const BYTE *)address))
+ {
+ opcode = CORDbgGetInstruction((CORDB_ADDRESS_TYPE *)address);
+ }
+// <REVISIT_TODO>
+// TODO: : determine if this is needed for AMD64
+// </REVISIT_TODO>
+#ifdef _TARGET_X86_ //what is this?!
+ else
+ {
+ //
+ // !!! IL patch logic assumes reference encoding
+ //
+
+ opcode = *(unsigned short*)(address+1);
+ }
+#endif //_TARGET_X86_
+
+ }
+
+ return opcode;
+}
+
+// Holding the controller lock, this will check if an address is patched,
+// and if so will then set the PRT_TYPE out parameter to the unpatched value.
+BOOL DebuggerController::CheckGetPatchedOpcode(CORDB_ADDRESS_TYPE *address,
+ /*OUT*/ PRD_TYPE *pOpcode)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE; // take Controller lock.
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_patches != NULL);
+
+ BOOL res;
+
+ ControllerLockHolder lockController;
+
+ //
+ // Look for a patch at the address
+ //
+
+ if (IsAddressPatched(address))
+ {
+ *pOpcode = GetPatchedOpcode(address);
+ res = TRUE;
+ }
+ else
+ {
+ InitializePRD(pOpcode);
+ res = FALSE;
+ }
+
+
+ return res;
+}
+
+// void DebuggerController::ActivatePatch() Place a breakpoint
+// so that threads will trip over this patch.
+// If there any patches at the address already, then copy
+// their opcode into this one & return. Otherwise,
+// call ApplyPatch(patch). There is an implicit list of patches at this
+// address by virtue of the fact that we can iterate through all the
+// patches in the patch with the same address.
+// DebuggerControllerPatch *patch: The patch to activate
+/* static */ void DebuggerController::ActivatePatch(DebuggerControllerPatch *patch)
+{
+ _ASSERTE(g_patches != NULL);
+ _ASSERTE(patch != NULL);
+ _ASSERTE(patch->IsBound() );
+ _ASSERTE(!patch->IsActivated() );
+
+ bool fApply = true;
+
+ //
+ // See if we already have an active patch at this address.
+ //
+ for (DebuggerControllerPatch *p = g_patches->GetPatch(patch->address);
+ p != NULL;
+ p = g_patches->GetNextPatch(p))
+ {
+ if (p != patch)
+ {
+ // If we're going to skip activating 'patch' because 'p' already exists at the same address
+ // then 'p' must be activated. We expect that all bound patches are activated.
+ _ASSERTE( p->IsActivated() );
+ patch->opcode = p->opcode;
+ fApply = false;
+ break;
+ }
+ }
+
+ //
+ // This is the only patch at this address - apply the patch
+ // to the code.
+ //
+ if (fApply)
+ {
+ ApplyPatch(patch);
+ }
+
+ _ASSERTE(patch->IsActivated() );
+}
+
+// void DebuggerController::DeactivatePatch() Make sure that a
+// patch won't be hit.
+// How: If this patch is the last one at this address, then
+// UnapplyPatch. The caller should then invoke RemovePatch to remove the
+// patch from the patch table.
+// DebuggerControllerPatch *patch: Patch to deactivate
+void DebuggerController::DeactivatePatch(DebuggerControllerPatch *patch)
+{
+ _ASSERTE(g_patches != NULL);
+
+ if( !patch->IsBound() ) {
+ // patch is not bound, nothing to do
+ return;
+ }
+
+ // We expect that all bound patches are also activated.
+ // One exception to this is if the shutdown thread killed another thread right after
+ // if deactivated a patch but before it got to remove it.
+ _ASSERTE(patch->IsActivated() );
+
+ bool fUnapply = true;
+
+ //
+ // See if we already have an active patch at this address.
+ //
+ for (DebuggerControllerPatch *p = g_patches->GetPatch(patch->address);
+ p != NULL;
+ p = g_patches->GetNextPatch(p))
+ {
+ if (p != patch)
+ {
+ // There is another patch at this address, so don't remove it
+ // However, clear the patch data so that we no longer consider this particular patch activated
+ fUnapply = false;
+ InitializePRD(&(patch->opcode));
+ break;
+ }
+ }
+
+ if (fUnapply)
+ {
+ UnapplyPatch(patch);
+ }
+
+ _ASSERTE(!patch->IsActivated() );
+
+ //
+ // Patch must now be removed from the table.
+ //
+}
+
+// AddILMasterPatch: record a patch on IL code but do not bind it or activate it. The master b.p.
+// is associated with a module/token pair. It is used later
+// (e.g. in MapAndBindFunctionPatches) to create one or more "slave"
+// breakpoints which are associated with particular MethodDescs/JitInfos.
+//
+// Rationale: For generic code a single IL patch (e.g a breakpoint)
+// may give rise to several patches, one for each JITting of
+// the IL (i.e. generic code may be JITted multiple times for
+// different instantiations).
+//
+// So we keep one patch which describes
+// the breakpoint but which is never actually bound or activated.
+// This is then used to apply new "slave" patches to all copies of
+// JITted code associated with the method.
+//
+// <REVISIT_TODO>In theory we could bind and apply the master patch when the
+// code is known not to be generic (as used to happen to all breakpoint
+// patches in V1). However this seems like a premature
+// optimization.</REVISIT_TODO>
+DebuggerControllerPatch *DebuggerController::AddILMasterPatch(Module *module,
+ mdMethodDef md,
+ SIZE_T offset,
+ SIZE_T encVersion)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_patches != NULL);
+
+ ControllerLockHolder ch;
+
+
+ DebuggerControllerPatch *patch = g_patches->AddPatchForMethodDef(this,
+ module,
+ md,
+ offset,
+ PATCH_KIND_IL_MASTER,
+ LEAF_MOST_FRAME,
+ NULL,
+ encVersion,
+ NULL);
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "DC::AP: Added IL master patch 0x%x for md 0x%x at offset %d encVersion %d\n", patch, md, offset, encVersion));
+
+ return patch;
+}
+
+// See notes above on AddILMasterPatch
+BOOL DebuggerController::AddBindAndActivateILSlavePatch(DebuggerControllerPatch *master,
+ DebuggerJitInfo *dji)
+{
+ _ASSERTE(g_patches != NULL);
+ _ASSERTE(master->IsILMasterPatch());
+ _ASSERTE(dji != NULL);
+
+ // Do not dereference the "master" pointer in the loop! The loop may add more patches,
+ // causing the patch table to grow and move.
+ BOOL result = FALSE;
+ SIZE_T masterILOffset = master->offset;
+
+ // Loop through all the native offsets mapped to the given IL offset. On x86 the mapping
+ // should be 1:1. On WIN64, because there are funclets, we have have an 1:N mapping.
+ DebuggerJitInfo::ILToNativeOffsetIterator it;
+ for (dji->InitILToNativeOffsetIterator(it, masterILOffset); !it.IsAtEnd(); it.Next())
+ {
+ BOOL fExact;
+ SIZE_T offsetNative = it.Current(&fExact);
+
+ // We special case offset 0, which is when a breakpoint is set
+ // at the beginning of a method that hasn't been jitted yet. In
+ // that case it's possible that offset 0 has been optimized out,
+ // but we still want to set the closest breakpoint to that.
+ if (!fExact && (masterILOffset != 0))
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DC::BP:Failed to bind patch at IL offset 0x%p in %s::%s\n",
+ masterILOffset, dji->m_fd->m_pszDebugClassName, dji->m_fd->m_pszDebugMethodName));
+
+ continue;
+ }
+ else
+ {
+ result = TRUE;
+ }
+
+ INDEBUG(BOOL fOk = )
+ AddBindAndActivatePatchForMethodDesc(dji->m_fd, dji,
+ offsetNative, PATCH_KIND_IL_SLAVE,
+ LEAF_MOST_FRAME, m_pAppDomain);
+ _ASSERTE(fOk);
+ }
+
+ // As long as we have successfully bound at least one patch, we consider the operation successful.
+ return result;
+}
+
+
+
+// This routine places a patch that is conceptually a patch on the IL code.
+// The IL code may be jitted multiple times, e.g. due to generics.
+// This routine ensures that both present and subsequent JITtings of code will
+// also be patched.
+//
+// This routine will return FALSE only if we will _never_ be able to
+// place the patch in any native code corresponding to the given offset.
+// Otherwise it will:
+// (a) record a "master" patch
+// (b) apply as many slave patches as it can to existing copies of code
+// that have debugging information
+BOOL DebuggerController::AddILPatch(AppDomain * pAppDomain, Module *module,
+ mdMethodDef md,
+ SIZE_T encVersion, // what encVersion does this apply to?
+ SIZE_T offset)
+{
+ _ASSERTE(g_patches != NULL);
+ _ASSERTE(md != NULL);
+ _ASSERTE(module != NULL);
+
+ BOOL fOk = FALSE;
+
+ DebuggerMethodInfo *dmi = g_pDebugger->GetOrCreateMethodInfo(module, md); // throws
+ if (dmi == NULL)
+ {
+ return false;
+ }
+
+ EX_TRY
+ {
+ // OK, we either have (a) no code at all or (b) we have both JIT information and code
+ //.
+ // Either way, lay down the MasterPatch.
+ //
+ // MapAndBindFunctionPatches will take care of any instantiations that haven't
+ // finished JITting, by making a copy of the master breakpoint.
+ DebuggerControllerPatch *master = AddILMasterPatch(module, md, offset, encVersion);
+
+ // We have to keep the index here instead of the pointer. The loop below adds more patches,
+ // which may cause the patch table to grow and move.
+ ULONG masterIndex = g_patches->GetItemIndex((HASHENTRY*)master);
+
+ // Iterate through every existing NativeCodeBlob (with the same EnC version).
+ // This includes generics + prejitted code.
+ DebuggerMethodInfo::DJIIterator it;
+ dmi->IterateAllDJIs(pAppDomain, NULL /* module filter */, &it);
+
+ if (it.IsAtEnd())
+ {
+ // It is okay if we don't have any DJIs yet. It just means that the method hasn't been jitted.
+ fOk = TRUE;
+ }
+ else
+ {
+ // On the other hand, if the method has been jitted, then we expect to be able to bind at least
+ // one breakpoint. The exception is when we have multiple EnC versions of the method, in which
+ // case it is ok if we don't bind any breakpoint. One scenario is when a method has been updated
+ // via EnC but it's not yet jitted. We need to allow a debugger to put a breakpoint on the new
+ // version of the method, but the new version won't have a DJI yet.
+ BOOL fVersionMatch = FALSE;
+ while(!it.IsAtEnd())
+ {
+ DebuggerJitInfo *dji = it.Current();
+ _ASSERTE(dji->m_jitComplete);
+ if (dji->m_encVersion == encVersion)
+ {
+ fVersionMatch = TRUE;
+
+ master = (DebuggerControllerPatch *)g_patches->GetEntryPtr(masterIndex);
+
+ // <REVISIT_TODO> If we're missing JIT info for any then
+ // we won't have applied the bp to every instantiation. That should probably be reported
+ // as a new kind of condition to the debugger, i.e. report "bp only partially applied". It would be
+ // a shame to completely fail just because on instantiation is missing debug info: e.g. just because
+ // one component hasn't been prejitted with debugging information.</REVISIT_TODO>
+ fOk = (AddBindAndActivateILSlavePatch(master, dji) || fOk);
+ }
+ it.Next();
+ }
+
+ // This is the exceptional case referred to in the comment above. If we fail to put a breakpoint
+ // because we don't have a matching version of the method, we need to return TRUE.
+ if (fVersionMatch == FALSE)
+ {
+ fOk = TRUE;
+ }
+ }
+ }
+ EX_CATCH
+ {
+ fOk = FALSE;
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+ return fOk;
+}
+
+// Add a patch at native-offset 0 in the latest version of the method.
+// This is used by step-in.
+// Calls to new methods always go to the latest version, so EnC is not an issue here.
+// The method may be not yet jitted. Or it may be prejitted.
+void DebuggerController::AddPatchToStartOfLatestMethod(MethodDesc * fd)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS; // from GetJitInfo
+ GC_NOTRIGGER;
+ MODE_ANY; // don't really care what mode we're in.
+
+ PRECONDITION(ThisMaybeHelperThread());
+ PRECONDITION(CheckPointer(fd));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_patches != NULL);
+ DebuggerController::AddBindAndActivatePatchForMethodDesc(fd, NULL, 0, PATCH_KIND_NATIVE_MANAGED, LEAF_MOST_FRAME, NULL);
+ return;
+}
+
+
+// Place patch in method at native offset.
+BOOL DebuggerController::AddBindAndActivateNativeManagedPatch(MethodDesc * fd,
+ DebuggerJitInfo *dji,
+ SIZE_T offsetNative,
+ FramePointer fp,
+ AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS; // from GetJitInfo
+ GC_NOTRIGGER;
+ MODE_ANY; // don't really care what mode we're in.
+
+ PRECONDITION(ThisMaybeHelperThread());
+ PRECONDITION(CheckPointer(fd));
+ PRECONDITION(fd->IsDynamicMethod() || (dji != NULL));
+ }
+ CONTRACTL_END;
+
+ // For non-dynamic methods, we always expect to have a DJI, but just in case, we don't want the assert to AV.
+ _ASSERTE((dji == NULL) || (fd == dji->m_fd));
+ _ASSERTE(g_patches != NULL);
+ return DebuggerController::AddBindAndActivatePatchForMethodDesc(fd, dji, offsetNative, PATCH_KIND_NATIVE_MANAGED, fp, pAppDomain);
+}
+
+
+BOOL DebuggerController::AddBindAndActivatePatchForMethodDesc(MethodDesc *fd,
+ DebuggerJitInfo *dji,
+ SIZE_T offset,
+ DebuggerPatchKind kind,
+ FramePointer fp,
+ AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY; // don't really care what mode we're in.
+
+ PRECONDITION(ThisMaybeHelperThread());
+ }
+ CONTRACTL_END;
+
+ BOOL ok = FALSE;
+ ControllerLockHolder ch;
+
+ LOG((LF_CORDB|LF_ENC,LL_INFO10000,"DC::AP: Add to %s::%s, at offs 0x%x "
+ "fp:0x%x AD:0x%x\n", fd->m_pszDebugClassName,
+ fd->m_pszDebugMethodName,
+ offset, fp.GetSPValue(), pAppDomain));
+
+ DebuggerControllerPatch *patch = g_patches->AddPatchForMethodDef(
+ this,
+ g_pEEInterface->MethodDescGetModule(fd),
+ fd->GetMemberDef(),
+ offset,
+ kind,
+ fp,
+ pAppDomain,
+ NULL,
+ dji);
+
+ if (DebuggerController::BindPatch(patch, fd, NULL))
+ {
+ LOG((LF_CORDB|LF_ENC,LL_INFO1000,"BindPatch went fine, doing ActivatePatch\n"));
+ DebuggerController::ActivatePatch(patch);
+ ok = TRUE;
+ }
+
+ return ok;
+}
+
+
+// This version is particularly useful b/c it doesn't assume that the
+// patch is inside a managed method.
+DebuggerControllerPatch *DebuggerController::AddAndActivateNativePatchForAddress(CORDB_ADDRESS_TYPE *address,
+ FramePointer fp,
+ bool managed,
+ TraceType traceType)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+
+ PRECONDITION(g_patches != NULL);
+ }
+ CONTRACTL_END;
+
+
+ ControllerLockHolder ch;
+
+ DebuggerControllerPatch *patch
+ = g_patches->AddPatchForAddress(this,
+ NULL,
+ 0,
+ (managed? PATCH_KIND_NATIVE_MANAGED : PATCH_KIND_NATIVE_UNMANAGED),
+ address,
+ fp,
+ NULL,
+ NULL,
+ DebuggerPatchTable::DCP_PID_INVALID,
+ traceType);
+
+ ActivatePatch(patch);
+
+ return patch;
+}
+
+void DebuggerController::RemovePatchesFromModule(Module *pModule, AppDomain *pAppDomain )
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO100000, "DPT::CPFM mod:0x%p (%S)\n",
+ pModule, pModule->GetDebugName()));
+
+ // First find all patches of interest
+ DebuggerController::ControllerLockHolder ch;
+ HASHFIND f;
+ for (DebuggerControllerPatch *patch = g_patches->GetFirstPatch(&f);
+ patch != NULL;
+ patch = g_patches->GetNextPatch(&f))
+ {
+ // Skip patches not in the specified domain
+ if ((pAppDomain != NULL) && (patch->pAppDomain != pAppDomain))
+ continue;
+
+ BOOL fRemovePatch = FALSE;
+
+ // Remove both native and IL patches the belong to this module
+ if (patch->HasDJI())
+ {
+ DebuggerJitInfo * dji = patch->GetDJI();
+
+ _ASSERTE(patch->key.module == dji->m_fd->GetModule());
+
+ // It is not necessary to check for m_fd->GetModule() here. It will
+ // be covered by other module unload notifications issued for the appdomain.
+ if ( dji->m_pLoaderModule == pModule )
+ fRemovePatch = TRUE;
+ }
+ else
+ if (patch->key.module == pModule)
+ {
+ fRemovePatch = TRUE;
+ }
+
+ if (fRemovePatch)
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "Removing patch 0x%p\n",
+ patch));
+ // we shouldn't be both hitting this patch AND
+ // unloading the module it belongs to.
+ _ASSERTE(!patch->IsTriggering());
+ Release( patch );
+ }
+ }
+}
+
+#ifdef _DEBUG
+bool DebuggerController::ModuleHasPatches( Module* pModule )
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if( g_patches == NULL )
+ {
+ // Patch table hasn't been initialized
+ return false;
+ }
+
+ // First find all patches of interest
+ HASHFIND f;
+ for (DebuggerControllerPatch *patch = g_patches->GetFirstPatch(&f);
+ patch != NULL;
+ patch = g_patches->GetNextPatch(&f))
+ {
+ //
+ // This mirrors logic in code:DebuggerController::RemovePatchesFromModule
+ //
+
+ if (patch->HasDJI())
+ {
+ DebuggerJitInfo * dji = patch->GetDJI();
+
+ _ASSERTE(patch->key.module == dji->m_fd->GetModule());
+
+ // It may be sufficient to just check m_pLoaderModule here. Since this is used for debug-only
+ // check, we will check for m_fd->GetModule() as well to catch more potential problems.
+ if ( (dji->m_pLoaderModule == pModule) || (dji->m_fd->GetModule() == pModule) )
+ {
+ return true;
+ }
+ }
+
+ if (patch->key.module == pModule)
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+#endif // _DEBUG
+
+//
+// Returns true if the given address is in an internal helper
+// function, false if its not.
+//
+// This is a temporary workaround function to avoid having us stop in
+// unmanaged code belonging to the Runtime during a StepIn operation.
+//
+static bool _AddrIsJITHelper(PCODE addr)
+{
+#if !defined(_WIN64) && !defined(FEATURE_PAL)
+ // Is the address in the runtime dll (clr.dll or coreclr.dll) at all? (All helpers are in
+ // that dll)
+ if (g_runtimeLoadedBaseAddress <= addr &&
+ addr < g_runtimeLoadedBaseAddress + g_runtimeVirtualSize)
+ {
+ for (int i = 0; i < CORINFO_HELP_COUNT; i++)
+ {
+ if (hlpFuncTable[i].pfnHelper == (void*)addr)
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "_ANIM: address of helper function found: 0x%08x\n",
+ addr));
+ return true;
+ }
+ }
+
+ for (unsigned d = 0; d < DYNAMIC_CORINFO_HELP_COUNT; d++)
+ {
+ if (hlpDynamicFuncTable[d].pfnHelper == (void*)addr)
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "_ANIM: address of helper function found: 0x%08x\n",
+ addr));
+ return true;
+ }
+ }
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "_ANIM: address within runtime dll, but not a helper function "
+ "0x%08x\n", addr));
+ }
+#else // !defined(_WIN64) && !defined(FEATURE_PAL)
+ // TODO: Figure out what we want to do here
+#endif // !defined(_WIN64) && !defined(FEATURE_PAL)
+
+ return false;
+}
+
+// bool DebuggerController::PatchTrace() What: Invoke
+// AddPatch depending on the type of the given TraceDestination.
+// How: Invokes AddPatch based on the trace type: TRACE_OTHER will
+// return false, the others will obtain args for a call to an AddPatch
+// method & return true.
+//
+// Return true if we set a patch, else false
+bool DebuggerController::PatchTrace(TraceDestination *trace,
+ FramePointer fp,
+ bool fStopInUnmanaged)
+{
+ CONTRACTL
+ {
+ THROWS; // Because AddPatch may throw on oom. We may want to convert this to nothrow and return false.
+ MODE_ANY;
+ DISABLED(GC_TRIGGERS); // @todo - what should this be?
+
+ PRECONDITION(ThisMaybeHelperThread());
+ }
+ CONTRACTL_END;
+ DebuggerControllerPatch *dcp = NULL;
+
+ switch (trace->GetTraceType())
+ {
+ case TRACE_ENTRY_STUB: // fall through
+ case TRACE_UNMANAGED:
+ LOG((LF_CORDB, LL_INFO10000,
+ "DC::PT: Setting unmanaged trace patch at 0x%p(%p)\n",
+ trace->GetAddress(), fp.GetSPValue()));
+
+ if (fStopInUnmanaged && !_AddrIsJITHelper(trace->GetAddress()))
+ {
+ AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE *)trace->GetAddress(),
+ fp,
+ FALSE,
+ trace->GetTraceType());
+ return true;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DC::PT: decided to NOT "
+ "place a patch in unmanaged code\n"));
+ return false;
+ }
+
+ case TRACE_MANAGED:
+ LOG((LF_CORDB, LL_INFO10000,
+ "Setting managed trace patch at 0x%p(%p)\n", trace->GetAddress(), fp.GetSPValue()));
+
+ MethodDesc *fd;
+ fd = g_pEEInterface->GetNativeCodeMethodDesc(trace->GetAddress());
+ _ASSERTE(fd);
+
+ DebuggerJitInfo *dji;
+ dji = g_pDebugger->GetJitInfoFromAddr(trace->GetAddress());
+ //_ASSERTE(dji); //we'd like to assert this, but attach won't work
+
+ AddBindAndActivateNativeManagedPatch(fd,
+ dji,
+ CodeRegionInfo::GetCodeRegionInfo(dji, fd).AddressToOffset((const BYTE *)trace->GetAddress()),
+ fp,
+ NULL);
+ return true;
+
+ case TRACE_UNJITTED_METHOD:
+ // trace->address is actually a MethodDesc* of the method that we'll
+ // soon JIT, so put a relative bp at offset zero in.
+ LOG((LF_CORDB, LL_INFO10000,
+ "Setting unjitted method patch in MethodDesc 0x%p %s\n", trace->GetMethodDesc(), trace->GetMethodDesc() ? trace->GetMethodDesc()->m_pszDebugMethodName : ""));
+
+ // Note: we have to make sure to bind here. If this function is prejitted, this may be our only chance to get a
+ // DebuggerJITInfo and thereby cause a JITComplete callback.
+ AddPatchToStartOfLatestMethod(trace->GetMethodDesc());
+ return true;
+
+ case TRACE_FRAME_PUSH:
+ LOG((LF_CORDB, LL_INFO10000,
+ "Setting frame patch at 0x%p(%p)\n", trace->GetAddress(), fp.GetSPValue()));
+
+ AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE *)trace->GetAddress(),
+ fp,
+ TRUE,
+ TRACE_FRAME_PUSH);
+ return true;
+
+ case TRACE_MGR_PUSH:
+ LOG((LF_CORDB, LL_INFO10000,
+ "Setting frame patch (TRACE_MGR_PUSH) at 0x%p(%p)\n",
+ trace->GetAddress(), fp.GetSPValue()));
+
+ dcp = AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE *)trace->GetAddress(),
+ LEAF_MOST_FRAME, // But Mgr_push can't have fp affinity!
+ TRUE,
+ DPT_DEFAULT_TRACE_TYPE); // TRACE_OTHER
+ // Now copy over the trace field since TriggerPatch will expect this
+ // to be set for this case.
+ if (dcp != NULL)
+ {
+ dcp->trace = *trace;
+ }
+
+ return true;
+
+ case TRACE_OTHER:
+ LOG((LF_CORDB, LL_INFO10000,
+ "Can't set a trace patch for TRACE_OTHER...\n"));
+ return false;
+
+ default:
+ _ASSERTE(0);
+ return false;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Checks if the patch matches the context + thread.
+// Multiple patches can exist at a single address, so given a patch at the
+// Context's current address, this does additional patch-affinity checks like
+// thread, AppDomain, and frame-pointer.
+// thread - thread executing the given context that hit the patch
+// context - context of the thread that hit the patch
+// patch - candidate patch that we're looking for a match.
+// Returns:
+// True if the patch matches.
+// False
+//-----------------------------------------------------------------------------
+bool DebuggerController::MatchPatch(Thread *thread,
+ CONTEXT *context,
+ DebuggerControllerPatch *patch)
+{
+ LOG((LF_CORDB, LL_INFO100000, "DC::MP: EIP:0x%p\n", GetIP(context)));
+
+ // Caller should have already matched our addresses.
+ if (patch->address != dac_cast<PTR_CORDB_ADDRESS_TYPE>(GetIP(context)))
+ {
+ return false;
+ }
+
+ // <BUGNUM>RAID 67173 -</BUGNUM> we'll make sure that intermediate patches have NULL
+ // pAppDomain so that we don't end up running to completion when
+ // the appdomain switches halfway through a step.
+ if (patch->pAppDomain != NULL)
+ {
+ AppDomain *pAppDomainCur = thread->GetDomain();
+
+ if (pAppDomainCur != patch->pAppDomain)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DC::MP: patches didn't match b/c of "
+ "appdomains!\n"));
+ return false;
+ }
+ }
+
+ if (patch->controller->m_thread != NULL && patch->controller->m_thread != thread)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DC::MP: patches didn't match b/c threads\n"));
+ return false;
+ }
+
+ if (patch->fp != LEAF_MOST_FRAME)
+ {
+ // If we specified a Frame-pointer, than it should have been safe to take a stack trace.
+
+ ControllerStackInfo info;
+ StackTraceTicket ticket(patch);
+ info.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, context);
+
+ // !!! This check should really be != , but there is some ambiguity about which frame is the parent frame
+ // in the destination returned from Frame::TraceFrame, so this allows some slop there.
+
+ if (info.HasReturnFrame() && IsCloserToLeaf(info.m_returnFrame.fp, patch->fp))
+ {
+ LOG((LF_CORDB, LL_INFO10000, "Patch hit but frame not matched at %p (current=%p, patch=%p)\n",
+ patch->address, info.m_returnFrame.fp.GetSPValue(), patch->fp.GetSPValue()));
+
+ return false;
+ }
+ }
+
+ LOG((LF_CORDB, LL_INFO100000, "DC::MP: Returning true"));
+
+ return true;
+}
+
+DebuggerPatchSkip *DebuggerController::ActivatePatchSkip(Thread *thread,
+ const BYTE *PC,
+ BOOL fForEnC)
+{
+#ifdef _DEBUG
+ BOOL shouldBreak = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ActivatePatchSkip);
+ if (shouldBreak > 0) {
+ _ASSERTE(!"ActivatePatchSkip");
+ }
+#endif
+
+ LOG((LF_CORDB,LL_INFO10000, "DC::APS thread=0x%p pc=0x%p fForEnc=%d\n",
+ thread, PC, fForEnC));
+ _ASSERTE(g_patches != NULL);
+
+ // Previously, we assumed that if we got to this point & the patch
+ // was still there that we'd have to skip the patch. SetIP changes
+ // this like so:
+ // A breakpoint is set, and hit (but not removed), and all the
+ // EE threads come to a skreeching halt. The Debugger RC thread
+ // continues along, and is told to SetIP of the thread that hit
+ // the BP to whatever. Eventually the RC thread is told to continue,
+ // and at that point the EE thread is released, finishes DispatchPatchOrSingleStep,
+ // and shows up here.
+ // At that point, if the thread's current PC is
+ // different from the patch PC, then SetIP must have moved it elsewhere
+ // & we shouldn't do this patch skip (which will put us back to where
+ // we were, which is clearly wrong). If the PC _is_ the same, then
+ // the thread hasn't been moved, the patch is still in the code stream,
+ // and we want to do the patch skip thing in order to execute this
+ // instruction w/o removing it from the code stream.
+
+ DebuggerControllerPatch *patch = g_patches->GetPatch((CORDB_ADDRESS_TYPE *)PC);
+ DebuggerPatchSkip *skip = NULL;
+
+ if (patch != NULL && patch->IsNativePatch())
+ {
+ //
+ // We adjust the thread's PC to someplace where we write
+ // the next instruction, then
+ // we single step over that, then we set the PC back here so
+ // we don't let other threads race past here while we're stepping
+ // this one.
+ //
+ // !!! check result
+ LOG((LF_CORDB,LL_INFO10000, "DC::APS: About to skip from PC=0x%p\n", PC));
+ skip = new (interopsafe) DebuggerPatchSkip(thread, patch, thread->GetDomain());
+ TRACE_ALLOC(skip);
+ }
+
+ return skip;
+}
+
+DPOSS_ACTION DebuggerController::ScanForTriggers(CORDB_ADDRESS_TYPE *address,
+ Thread *thread,
+ CONTEXT *context,
+ DebuggerControllerQueue *pDcq,
+ SCAN_TRIGGER stWhat,
+ TP_RESULT *pTpr)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ // @todo - should this throw or not?
+ NOTHROW;
+
+ // call Triggers which may invoke GC stuff... See comment in DispatchNativeException for why it's disabled.
+ DISABLED(GC_TRIGGERS);
+ PRECONDITION(!ThisIsHelperThreadWorker());
+
+ PRECONDITION(CheckPointer(address));
+ PRECONDITION(CheckPointer(thread));
+ PRECONDITION(CheckPointer(context));
+ PRECONDITION(CheckPointer(pDcq));
+ PRECONDITION(CheckPointer(pTpr));
+
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(HasLock());
+
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ LOG((LF_CORDB, LL_INFO10000, "DC::SFT: starting scan for addr:0x%x"
+ " thread:0x%x\n", address, thread));
+
+ _ASSERTE( pTpr != NULL );
+ DebuggerControllerPatch *patch = NULL;
+
+ if (g_patches != NULL)
+ patch = g_patches->GetPatch(address);
+
+ ULONG iEvent = UINT32_MAX;
+ ULONG iEventNext = UINT32_MAX;
+ BOOL fDone = FALSE;
+
+ // This is a debugger exception if there's a patch here, or
+ // we're here for something like a single step.
+ DPOSS_ACTION used = DPOSS_INVALID;
+ if ((patch != NULL) || !IsPatched(address, TRUE))
+ {
+ // we are sure that we care for this exception but not sure
+ // if we will send event to the RS
+ used = DPOSS_USED_WITH_NO_EVENT;
+ }
+ else
+ {
+ // initialize it to don't care for now
+ used = DPOSS_DONT_CARE;
+ }
+
+ TP_RESULT tpr = TPR_IGNORE;
+
+ while (stWhat & ST_PATCH &&
+ patch != NULL &&
+ !fDone)
+ {
+ _ASSERTE(IsInUsedAction(used) == true);
+
+ DebuggerControllerPatch *patchNext
+ = g_patches->GetNextPatch(patch);
+
+ LOG((LF_CORDB, LL_INFO10000, "DC::SFT: patch 0x%x, patchNext 0x%x\n", patch, patchNext));
+
+ // Annoyingly, TriggerPatch may add patches, which may cause
+ // the patch table to move, which may, in turn, invalidate
+ // the patch (and patchNext) pointers. Store indeces, instead.
+ iEvent = g_patches->GetItemIndex( (HASHENTRY *)patch );
+
+ if (patchNext != NULL)
+ {
+ iEventNext = g_patches->GetItemIndex((HASHENTRY *)patchNext);
+ }
+
+ if (MatchPatch(thread, context, patch))
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DC::SFT: patch matched\n"));
+ AddRef(patch);
+
+ // We are hitting a patch at a virtual trace call target, so let's trigger trace call here.
+ if (patch->trace.GetTraceType() == TRACE_ENTRY_STUB)
+ {
+ patch->controller->TriggerTraceCall(thread, dac_cast<PTR_CBYTE>(::GetIP(context)));
+ tpr = TPR_IGNORE;
+ }
+ else
+ {
+ // Mark if we're at an unsafe place.
+ AtSafePlaceHolder unsafePlaceHolder(thread);
+
+ tpr = patch->controller->TriggerPatch(patch,
+ thread,
+ TY_NORMAL);
+ }
+
+ // Any patch may potentially send an event.
+ // (Whereas some single-steps are "internal-only" and can
+ // never send an event- such as a single step over an exception that
+ // lands us in la-la land.)
+ used = DPOSS_USED_WITH_EVENT;
+
+ if (tpr == TPR_TRIGGER ||
+ tpr == TPR_TRIGGER_ONLY_THIS ||
+ tpr == TPR_TRIGGER_ONLY_THIS_AND_LOOP)
+ {
+ // Make sure we've still got a valid pointer.
+ patch = (DebuggerControllerPatch *)
+ DebuggerController::g_patches->GetEntryPtr( iEvent );
+
+ pDcq->dcqEnqueue(patch->controller, TRUE); // <REVISIT_TODO>@todo Return value</REVISIT_TODO>
+ }
+
+ // Make sure we've got a valid pointer in case TriggerPatch
+ // returned false but still caused the table to move.
+ patch = (DebuggerControllerPatch *)
+ g_patches->GetEntryPtr( iEvent );
+
+ // A patch can be deleted as a result of it's being triggered.
+ // The actual deletion of the patch is delayed until after the
+ // the end of the trigger.
+ // Moreover, "patchNext" could have been deleted as a result of DisableAll()
+ // being called in TriggerPatch(). Thus, we should update our patchNext
+ // pointer now. We were just lucky before, because the now-deprecated
+ // "deleted" flag didn't get set when we iterate the patches in DisableAll().
+ patchNext = g_patches->GetNextPatch(patch);
+ if (patchNext != NULL)
+ iEventNext = g_patches->GetItemIndex((HASHENTRY *)patchNext);
+
+ // Note that Release() actually removes the patch if its ref count
+ // reaches 0 after the release.
+ Release(patch);
+ }
+
+ if (tpr == TPR_IGNORE_AND_STOP ||
+ tpr == TPR_TRIGGER_ONLY_THIS ||
+ tpr == TPR_TRIGGER_ONLY_THIS_AND_LOOP)
+ {
+#ifdef _DEBUG
+ if (tpr == TPR_TRIGGER_ONLY_THIS ||
+ tpr == TPR_TRIGGER_ONLY_THIS_AND_LOOP)
+ _ASSERTE(pDcq->dcqGetCount() == 1);
+#endif //_DEBUG
+
+ fDone = TRUE;
+ }
+ else if (patchNext != NULL)
+ {
+ patch = (DebuggerControllerPatch *)
+ g_patches->GetEntryPtr(iEventNext);
+ }
+ else
+ {
+ patch = NULL;
+ }
+ }
+
+ if (stWhat & ST_SINGLE_STEP &&
+ tpr != TPR_TRIGGER_ONLY_THIS)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DC::SFT: Trigger controllers with single step\n"));
+
+ //
+ // Now, go ahead & trigger all controllers with
+ // single step events
+ //
+
+ DebuggerController *p;
+
+ p = g_controllers;
+ while (p != NULL)
+ {
+ DebuggerController *pNext = p->m_next;
+
+ if (p->m_thread == thread && p->m_singleStep)
+ {
+ if (used == DPOSS_DONT_CARE)
+ {
+ // Debugger does care for this exception.
+ used = DPOSS_USED_WITH_NO_EVENT;
+ }
+
+ if (p->TriggerSingleStep(thread, (const BYTE *)address))
+ {
+ // by now, we should already know that we care for this exception.
+ _ASSERTE(IsInUsedAction(used) == true);
+
+ // now we are sure that we will send event to the RS
+ used = DPOSS_USED_WITH_EVENT;
+ pDcq->dcqEnqueue(p, FALSE); // <REVISIT_TODO>@todo Return value</REVISIT_TODO>
+
+ }
+ }
+
+ p = pNext;
+ }
+
+ UnapplyTraceFlag(thread);
+
+ //
+ // See if we have any steppers still active for this thread, if so
+ // re-apply the trace flag.
+ //
+
+ p = g_controllers;
+ while (p != NULL)
+ {
+ if (p->m_thread == thread && p->m_singleStep)
+ {
+ ApplyTraceFlag(thread);
+ break;
+ }
+
+ p = p->m_next;
+ }
+ }
+
+ // Significant speed increase from single dereference, I bet :)
+ (*pTpr) = tpr;
+
+ LOG((LF_CORDB, LL_INFO10000, "DC::SFT returning 0x%x as used\n",used));
+ return used;
+}
+
+#ifdef EnC_SUPPORTED
+DebuggerControllerPatch *DebuggerController::IsXXXPatched(const BYTE *PC,
+ DEBUGGER_CONTROLLER_TYPE dct)
+{
+ _ASSERTE(g_patches != NULL);
+
+ DebuggerControllerPatch *patch = g_patches->GetPatch((CORDB_ADDRESS_TYPE *)PC);
+
+ while(patch != NULL &&
+ (int)patch->controller->GetDCType() <= (int)dct)
+ {
+ if (patch->IsNativePatch() &&
+ patch->controller->GetDCType()==dct)
+ {
+ return patch;
+ }
+ patch = g_patches->GetNextPatch(patch);
+ }
+
+ return NULL;
+}
+
+// This function will check for an EnC patch at the given address and return
+// it if one is there, otherwise it will return NULL.
+DebuggerControllerPatch *DebuggerController::GetEnCPatch(const BYTE *address)
+{
+ _ASSERTE(address);
+
+ if( g_pEEInterface->IsManagedNativeCode(address) )
+ {
+ DebuggerJitInfo *dji = g_pDebugger->GetJitInfoFromAddr((TADDR) address);
+ if (dji == NULL)
+ return NULL;
+
+ // we can have two types of patches - one in code where the IL has been updated to trigger
+ // the switch and the other in the code we've switched to in order to trigger FunctionRemapComplete
+ // callback. If version == default then can't be the latter, but otherwise if haven't handled the
+ // remap for this function yet is certainly the latter.
+ if (! dji->m_encBreakpointsApplied &&
+ (dji->m_encVersion == CorDB_DEFAULT_ENC_FUNCTION_VERSION))
+ {
+ return NULL;
+ }
+ }
+ return IsXXXPatched(address, DEBUGGER_CONTROLLER_ENC);
+}
+#endif //EnC_SUPPORTED
+
+// DebuggerController::DispatchPatchOrSingleStep - Ask any patches that are active at a given
+// address if they want to do anything about the exception that's occured there. How: For the given
+// address, go through the list of patches & see if any of them are interested (by invoking their
+// DebuggerController's TriggerPatch). Put any DCs that are interested into a queue and then calls
+// SendEvent on each.
+// Note that control will not return from this function in the case of EnC remap
+DPOSS_ACTION DebuggerController::DispatchPatchOrSingleStep(Thread *thread, CONTEXT *context, CORDB_ADDRESS_TYPE *address, SCAN_TRIGGER which)
+{
+ CONTRACT(DPOSS_ACTION)
+ {
+ // @todo - should this throw or not?
+ NOTHROW;
+ DISABLED(GC_TRIGGERS); // Only GC triggers if we send an event. See Comment in DispatchNativeException
+ PRECONDITION(!ThisIsHelperThreadWorker());
+
+ PRECONDITION(CheckPointer(thread));
+ PRECONDITION(CheckPointer(context));
+ PRECONDITION(CheckPointer(address));
+ PRECONDITION(!HasLock());
+
+ POSTCONDITION(!HasLock()); // make sure we're not leaking the controller lock
+ }
+ CONTRACT_END;
+
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ LOG((LF_CORDB|LF_ENC,LL_INFO1000,"DC:DPOSS at 0x%x trigger:0x%x\n", address, which));
+
+ // We should only have an exception if some managed thread was running.
+ // Thus we should never be here when we're stopped.
+ // @todo - this assert fires! Is that an issue, or is it invalid?
+ //_ASSERTE(!g_pDebugger->IsStopped());
+ DPOSS_ACTION used = DPOSS_DONT_CARE;
+
+ DebuggerControllerQueue dcq;
+ if (!g_patchTableValid)
+ {
+
+ LOG((LF_CORDB|LF_ENC, LL_INFO1000, "DC::DPOSS returning, no patch table.\n"));
+ RETURN (used);
+ }
+ _ASSERTE(g_patches != NULL);
+
+ CrstHolderWithState lockController(&g_criticalSection);
+
+#ifdef EnC_SUPPORTED
+ DebuggerControllerPatch *dcpEnCOriginal = NULL;
+
+ // If this sequence point has an EnC patch, we want to process it ahead of any others. If the
+ // debugger wants to remap the function at this point, then we'll call ResumeInUpdatedFunction and
+ // not return, otherwise we will just continue with regular patch-handling logic
+ dcpEnCOriginal = GetEnCPatch(dac_cast<PTR_CBYTE>(GetIP(context)));
+
+ if (dcpEnCOriginal)
+ {
+ LOG((LF_CORDB|LF_ENC,LL_INFO10000, "DC::DPOSS EnC short-circuit\n"));
+ TP_RESULT tpres =
+ dcpEnCOriginal->controller->TriggerPatch(dcpEnCOriginal,
+ thread,
+ TY_SHORT_CIRCUIT);
+
+ // We will only come back here on a RemapOppporunity that wasn't taken, or on a RemapComplete.
+ // If we processed a RemapComplete (which returns TPR_IGNORE_AND_STOP), then don't want to handle
+ // additional breakpoints on the current line because we've already effectively executed to that point
+ // and would have hit them already. If they are new, we also don't want to hit them because eg. if are
+ // sitting on line 10 and add a breakpoint at line 10 and step,
+ // don't expect to stop at line 10, expect to go to line 11.
+ //
+ // Special case is if an EnC remap breakpoint exists in the function. This could only happen if the function was
+ // updated between the RemapOpportunity and the RemapComplete. In that case we want to not skip the patches
+ // and fall through to handle the remap breakpoint.
+
+ if (tpres == TPR_IGNORE_AND_STOP)
+ {
+ // It was a RemapComplete, so fall through. Set dcpEnCOriginal to NULL to indicate that any
+ // EnC patch still there should be treated as a new patch. Any RemapComplete patch will have been
+ // already removed by patch processing.
+ dcpEnCOriginal = NULL;
+ LOG((LF_CORDB|LF_ENC,LL_INFO10000, "DC::DPOSS done EnC short-circuit, exiting\n"));
+ used = DPOSS_USED_WITH_EVENT; // indicate that we handled a patch
+ goto Exit;
+ }
+
+ _ASSERTE(tpres==TPR_IGNORE);
+ LOG((LF_CORDB|LF_ENC,LL_INFO10000, "DC::DPOSS done EnC short-circuit, ignoring\n"));
+ // if we got here, then the EnC remap opportunity was not taken, so just continue on.
+ }
+#endif // EnC_SUPPORTED
+
+ TP_RESULT tpr;
+
+ used = ScanForTriggers((CORDB_ADDRESS_TYPE *)address, thread, context, &dcq, which, &tpr);
+
+ LOG((LF_CORDB|LF_ENC, LL_EVERYTHING, "DC::DPOSS ScanForTriggers called and returned.\n"));
+
+
+ // If we setip, then that will change the address in the context.
+ // Remeber the old address so that we can compare it to the context's ip and see if it changed.
+ // If it did change, then don't dispatch our current event.
+ TADDR originalAddress = (TADDR) address;
+
+#ifdef _DEBUG
+ // If we do a SetIP after this point, the value of address will be garbage. Set it to a distictive pattern now, so
+ // we don't accidentally use what will (98% of the time) appear to be a valid value.
+ address = (CORDB_ADDRESS_TYPE *)(UINT_PTR)0xAABBCCFF;
+#endif //_DEBUG
+
+ if (dcq.dcqGetCount()> 0)
+ {
+ lockController.Release();
+
+ // Mark if we're at an unsafe place.
+ bool atSafePlace = g_pDebugger->IsThreadAtSafePlace(thread);
+ if (!atSafePlace)
+ g_pDebugger->IncThreadsAtUnsafePlaces();
+
+ DWORD dwEvent = 0xFFFFFFFF;
+ DWORD dwNumberEvents = 0;
+ BOOL reabort = FALSE;
+
+ SENDIPCEVENT_BEGIN(g_pDebugger, thread);
+
+ // Now that we've resumed from blocking, check if somebody did a SetIp on us.
+ bool fIpChanged = (originalAddress != GetIP(context));
+
+ // Send the events outside of the controller lock
+ bool anyEventsSent = false;
+
+ dwNumberEvents = dcq.dcqGetCount();
+ dwEvent = 0;
+
+ while (dwEvent < dwNumberEvents)
+ {
+ DebuggerController *event = dcq.dcqGetElement(dwEvent);
+
+ if (!event->m_deleted)
+ {
+#ifdef DEBUGGING_SUPPORTED
+ if (thread->GetDomain()->IsDebuggerAttached())
+ {
+ if (event->SendEvent(thread, fIpChanged))
+ {
+ anyEventsSent = true;
+ }
+ }
+#endif //DEBUGGING_SUPPORTED
+ }
+
+ dwEvent++;
+ }
+
+ // Trap all threads if necessary, but only if we actually sent a event up (i.e., all the queued events weren't
+ // deleted before we got a chance to get the EventSending lock.)
+ if (anyEventsSent)
+ {
+ LOG((LF_CORDB|LF_ENC, LL_EVERYTHING, "DC::DPOSS We sent an event\n"));
+ g_pDebugger->SyncAllThreads(SENDIPCEVENT_PtrDbgLockHolder);
+ LOG((LF_CORDB,LL_INFO1000, "SAT called!\n"));
+ }
+
+
+ // If we need to to a re-abort (see below), then save the current IP in the thread's context before we block and
+ // possibly let another func eval get setup.
+ reabort = thread->m_StateNC & Thread::TSNC_DebuggerReAbort;
+ SENDIPCEVENT_END;
+
+ if (!atSafePlace)
+ g_pDebugger->DecThreadsAtUnsafePlaces();
+
+ lockController.Acquire();
+
+ // Dequeue the events while we have the controller lock.
+ dwEvent = 0;
+ while (dwEvent < dwNumberEvents)
+ {
+ dcq.dcqDequeue();
+ dwEvent++;
+ }
+ // If a func eval completed with a ThreadAbortException, go ahead and setup the thread to re-abort itself now
+ // that we're continuing the thread. Note: we make sure that the thread's IP hasn't changed between now and when
+ // we blocked above. While blocked above, the debugger has a chance to setup another func eval on this
+ // thread. If that happens, we don't want to setup the reabort just yet.
+ if (reabort)
+ {
+ if ((UINT_PTR)GetEEFuncEntryPoint(::FuncEvalHijack) != (UINT_PTR)GetIP(context))
+ {
+ HRESULT hr;
+ hr = g_pDebugger->FuncEvalSetupReAbort(thread, Thread::TAR_Thread);
+ _ASSERTE(SUCCEEDED(hr));
+ }
+ }
+ }
+
+#if defined EnC_SUPPORTED
+Exit:
+#endif
+
+ // Note: if the thread filter context is NULL, then SetIP would have failed & thus we should do the
+ // patch skip thing.
+ // @todo - do we need to get the context again here?
+ CONTEXT *pCtx = GetManagedLiveCtx(thread);
+
+#ifdef EnC_SUPPORTED
+ DebuggerControllerPatch *dcpEnCCurrent = GetEnCPatch(dac_cast<PTR_CBYTE>((GetIP(context))));
+
+ // we have a new patch if the original was null and the current is non-null. Otherwise we have an old
+ // patch. We want to skip old patches, but handle new patches.
+ if (dcpEnCOriginal == NULL && dcpEnCCurrent != NULL)
+ {
+ LOG((LF_CORDB|LF_ENC,LL_INFO10000, "DC::DPOSS EnC post-processing\n"));
+ dcpEnCCurrent->controller->TriggerPatch( dcpEnCCurrent,
+ thread,
+ TY_SHORT_CIRCUIT);
+ used = DPOSS_USED_WITH_EVENT; // indicate that we handled a patch
+ }
+#endif
+
+ ActivatePatchSkip(thread, dac_cast<PTR_CBYTE>(GetIP(pCtx)), FALSE);
+
+ lockController.Release();
+
+
+ // We pulse the GC mode here too cooperate w/ a thread trying to suspend the runtime. If we didn't pulse
+ // the GC, the odds of catching this thread in interuptable code may be very small (since this filter
+ // could be very large compared to the managed code this thread is running).
+ // Only do this if the exception was actually for the debugger. (We don't want to toggle the GC mode on every
+ // random exception). We can't do this while holding any debugger locks.
+ if (used == DPOSS_USED_WITH_EVENT)
+ {
+ bool atSafePlace = g_pDebugger->IsThreadAtSafePlace(thread);
+ if (!atSafePlace)
+ {
+ g_pDebugger->IncThreadsAtUnsafePlaces();
+ }
+
+ // Always pulse the GC mode. This will allow an async break to complete even if we have a patch
+ // at an unsafe place.
+ // If we are at an unsafe place, then we can't do a GC.
+ thread->PulseGCMode();
+
+ if (!atSafePlace)
+ {
+ g_pDebugger->DecThreadsAtUnsafePlaces();
+ }
+
+ }
+
+ RETURN used;
+}
+
+bool DebuggerController::IsSingleStepEnabled()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_singleStep;
+}
+
+void DebuggerController::EnableSingleStep()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ // Some controllers don't need to set the SS to do their job, and if they are setting it, it's likely an issue.
+ // So we assert here to catch them red-handed. This assert can always be updated to accomodate changes
+ // in a controller's behavior.
+
+ switch(GetDCType())
+ {
+ case DEBUGGER_CONTROLLER_THREAD_STARTER:
+ case DEBUGGER_CONTROLLER_BREAKPOINT:
+ case DEBUGGER_CONTROLLER_USER_BREAKPOINT:
+ case DEBUGGER_CONTROLLER_FUNC_EVAL_COMPLETE:
+ CONSISTENCY_CHECK_MSGF(false, ("Controller pThis=%p shouldn't be setting ss flag.", this));
+ break;
+ default: // MingW compilers require all enum cases to be handled in switch statement.
+ break;
+ }
+#endif
+
+ EnableSingleStep(m_thread);
+ m_singleStep = true;
+}
+
+#ifdef EnC_SUPPORTED
+// Note that this doesn't tell us if Single Stepping is currently enabled
+// at the hardware level (ie, for x86, if (context->EFlags & 0x100), but
+// rather, if we WANT single stepping enabled (pThread->m_State &Thread::TS_DebuggerIsStepping)
+// This gets called from exactly one place - ActivatePatchSkipForEnC
+BOOL DebuggerController::IsSingleStepEnabled(Thread *pThread)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // This should be an atomic operation, do we
+ // don't need to lock it.
+ if(pThread->m_StateNC & Thread::TSNC_DebuggerIsStepping)
+ {
+ _ASSERTE(pThread->m_StateNC & Thread::TSNC_DebuggerIsStepping);
+
+ return TRUE;
+ }
+ else
+ return FALSE;
+}
+#endif //EnC_SUPPORTED
+
+void DebuggerController::EnableSingleStep(Thread *pThread)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_INFO1000, "DC::EnableSingleStep\n"));
+
+ _ASSERTE(pThread != NULL);
+
+ ControllerLockHolder lockController;
+
+ ApplyTraceFlag(pThread);
+}
+
+// Disable Single stepping for this controller.
+// If none of the controllers on this thread want single-stepping, then also
+// ensure that it's disabled on the hardware level.
+void DebuggerController::DisableSingleStep()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_thread != NULL);
+
+ LOG((LF_CORDB,LL_INFO1000, "DC::DisableSingleStep\n"));
+
+ ControllerLockHolder lockController;
+ {
+ DebuggerController *p = g_controllers;
+
+ m_singleStep = false;
+
+ while (p != NULL)
+ {
+ if (p->m_thread == m_thread
+ && p->m_singleStep)
+ break;
+
+ p = p->m_next;
+ }
+
+ if (p == NULL)
+ {
+ UnapplyTraceFlag(m_thread);
+ }
+ }
+}
+
+
+//
+// ApplyTraceFlag sets the trace flag (i.e., turns on single-stepping)
+// for a thread.
+//
+void DebuggerController::ApplyTraceFlag(Thread *thread)
+{
+ LOG((LF_CORDB,LL_INFO1000, "DC::ApplyTraceFlag thread:0x%x [0x%0x]\n", thread, Debugger::GetThreadIdHelper(thread)));
+
+ CONTEXT *context;
+ if(thread->GetInteropDebuggingHijacked())
+ {
+ context = GetManagedLiveCtx(thread);
+ }
+ else
+ {
+ context = GetManagedStoppedCtx(thread);
+ }
+ CONSISTENCY_CHECK_MSGF(context != NULL, ("Can't apply ss flag to thread 0x%p b/c it's not in a safe place.\n", thread));
+ PREFIX_ASSUME(context != NULL);
+
+
+ g_pEEInterface->MarkThreadForDebugStepping(thread, true);
+ LOG((LF_CORDB,LL_INFO1000, "DC::ApplyTraceFlag marked thread for debug stepping\n"));
+
+ SetSSFlag(reinterpret_cast<DT_CONTEXT *>(context) ARM_ARG(thread));
+ LOG((LF_CORDB,LL_INFO1000, "DC::ApplyTraceFlag Leaving, baby!\n"));
+}
+
+//
+// UnapplyTraceFlag sets the trace flag for a thread.
+// Removes the hardware trace flag on this thread.
+//
+
+void DebuggerController::UnapplyTraceFlag(Thread *thread)
+{
+ LOG((LF_CORDB,LL_INFO1000, "DC::UnapplyTraceFlag thread:0x%x\n", thread));
+
+
+ // Either this is the helper thread, or we're manipulating our own context.
+ _ASSERTE(
+ ThisIsHelperThreadWorker() ||
+ (thread == ::GetThread())
+ );
+
+ CONTEXT *context = GetManagedStoppedCtx(thread);
+
+ // If there's no context available, then the thread shouldn't have the single-step flag
+ // enabled and there's nothing for us to do.
+ if (context == NULL)
+ {
+ // In theory, I wouldn't expect us to ever get here.
+ // Even if we are here, our single-step flag should already be deactivated,
+ // so there should be nothing to do. However, we still assert b/c we want to know how
+ // we'd actually hit this.
+ // @todo - is there a path if TriggerUnwind() calls DisableAll(). But why would
+ CONSISTENCY_CHECK_MSGF(false, ("How did we get here?. thread=%p\n", thread));
+ LOG((LF_CORDB,LL_INFO1000, "DC::UnapplyTraceFlag couldn't get context.\n"));
+ return;
+ }
+
+ // Always need to unmark for stepping
+ g_pEEInterface->MarkThreadForDebugStepping(thread, false);
+ UnsetSSFlag(reinterpret_cast<DT_CONTEXT *>(context) ARM_ARG(thread));
+}
+
+void DebuggerController::EnableExceptionHook()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_thread != NULL);
+
+ ControllerLockHolder lockController;
+
+ m_exceptionHook = true;
+}
+
+void DebuggerController::DisableExceptionHook()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(m_thread != NULL);
+
+ ControllerLockHolder lockController;
+ m_exceptionHook = false;
+}
+
+
+// void DebuggerController::DispatchExceptionHook() Called before
+// the switch statement in DispatchNativeException (therefore
+// when any exception occurs), this allows patches to do something before the
+// regular DispatchX methods.
+// How: Iterate through list of controllers. If m_exceptionHook
+// is set & m_thread is either thread or NULL, then invoke TriggerExceptionHook()
+BOOL DebuggerController::DispatchExceptionHook(Thread *thread,
+ CONTEXT *context,
+ EXCEPTION_RECORD *pException)
+{
+ // ExceptionHook has restrictive contract b/c it could come from anywhere.
+ // This can only modify controller's internal state. Can't send managed debug events.
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ GC_NOTRIGGER;
+ NOTHROW;
+ MODE_ANY;
+
+ // Filter context not set yet b/c we can only set it in COOP, and this may be in preemptive.
+ PRECONDITION(thread == ::GetThread());
+ PRECONDITION((g_pEEInterface->GetThreadFilterContext(thread) == NULL));
+ PRECONDITION(CheckPointer(pException));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000, "DC:: DispatchExceptionHook\n"));
+
+ if (!g_patchTableValid)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DC::DEH returning, no patch table.\n"));
+ return (TRUE);
+ }
+
+
+ _ASSERTE(g_patches != NULL);
+
+ ControllerLockHolder lockController;
+
+ TP_RESULT tpr = TPR_IGNORE;
+ DebuggerController *p;
+
+ p = g_controllers;
+ while (p != NULL)
+ {
+ DebuggerController *pNext = p->m_next;
+
+ if (p->m_exceptionHook
+ && (p->m_thread == NULL || p->m_thread == thread) &&
+ tpr != TPR_IGNORE_AND_STOP)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DC::DEH calling TEH...\n"));
+ tpr = p->TriggerExceptionHook(thread, context , pException);
+ LOG((LF_CORDB, LL_INFO1000, "DC::DEH ... returned.\n"));
+
+ if (tpr == TPR_IGNORE_AND_STOP)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DC:: DEH: leaving early!\n"));
+ break;
+ }
+ }
+
+ p = pNext;
+ }
+
+ LOG((LF_CORDB, LL_INFO1000, "DC:: DEH: returning 0x%x!\n", tpr));
+
+ return (tpr != TPR_IGNORE_AND_STOP);
+}
+
+//
+// EnableUnwind enables an unwind event to be called when the stack is unwound
+// (via an exception) to or past the given pointer.
+//
+
+void DebuggerController::EnableUnwind(FramePointer fp)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ASSERT(m_thread != NULL);
+ LOG((LF_CORDB,LL_EVERYTHING,"DC:EU EnableUnwind at 0x%x\n", fp.GetSPValue()));
+
+ ControllerLockHolder lockController;
+ m_unwindFP = fp;
+}
+
+FramePointer DebuggerController::GetUnwind()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_unwindFP;
+}
+
+//
+// DisableUnwind disables the unwind event for the controller.
+//
+
+void DebuggerController::DisableUnwind()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ ASSERT(m_thread != NULL);
+
+ LOG((LF_CORDB,LL_INFO1000, "DC::DU\n"));
+
+ ControllerLockHolder lockController;
+
+ m_unwindFP = LEAF_MOST_FRAME;
+}
+
+//
+// DispatchUnwind is called when an unwind happens.
+// the event to the appropriate controllers.
+// - handlerFP is the frame pointer that the handler will be invoked at.
+// - DJI is EnC-aware method that the handler is in.
+// - newOffset is the
+//
+bool DebuggerController::DispatchUnwind(Thread *thread,
+ MethodDesc *fd, DebuggerJitInfo * pDJI,
+ SIZE_T newOffset,
+ FramePointer handlerFP,
+ CorDebugStepReason unwindReason)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER; // don't send IPC events
+ MODE_COOPERATIVE; // TriggerUnwind always is coop
+
+ PRECONDITION(!IsDbgHelperSpecialThread());
+ }
+ CONTRACTL_END;
+
+
+ CONTRACT_VIOLATION(ThrowsViolation); // trigger unwind throws
+
+ _ASSERTE(unwindReason == STEP_EXCEPTION_FILTER || unwindReason == STEP_EXCEPTION_HANDLER);
+
+ bool used = false;
+
+ LOG((LF_CORDB, LL_INFO10000, "DC: Dispatch Unwind\n"));
+
+ ControllerLockHolder lockController;
+ {
+ DebuggerController *p;
+
+ p = g_controllers;
+
+ while (p != NULL)
+ {
+ DebuggerController *pNext = p->m_next;
+
+ if (p->m_thread == thread && p->m_unwindFP != LEAF_MOST_FRAME)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "Dispatch Unwind: Found candidate\n"));
+
+
+ // Assumptions here:
+ // Function with handlers are -ALWAYS- EBP-frame based (JIT assumption)
+ //
+ // newFrame is the EBP for the handler
+ // p->m_unwindFP points to the stack slot with the return address of the function.
+ //
+ // For the interesting case: stepover, we want to know if the handler is in the same function
+ // as the stepper, if its above it (caller) o under it (callee) in order to know if we want
+ // to patch the handler or not.
+ //
+ // 3 cases:
+ //
+ // a) Handler is in a function under the function where the step happened. It therefore is
+ // a stepover. We don't want to patch this handler. The handler will have an EBP frame.
+ // So it will be at least be 2 DWORDs away from the m_unwindFP of the controller (
+ // 1 DWORD from the pushed return address and 1 DWORD for the push EBP).
+ //
+ // b) Handler is in the same function as the stepper. We want to patch the handler. In this
+ // case handlerFP will be the same as p->m_unwindFP-sizeof(void*). Why? p->m_unwindFP
+ // stores a pointer to the return address of the function. As a function with a handler
+ // is always EBP frame based it will have the following code in the prolog:
+ //
+ // push ebp <- ( sub esp, 4 ; mov [esp], ebp )
+ // mov esp, ebp
+ //
+ // Therefore EBP will be equal to &CallerReturnAddress-4.
+ //
+ // c) Handler is above the function where the stepper is. We want to patch the handler. handlerFP
+ // will be always greater than the pointer to the return address of the function where the
+ // stepper is.
+ //
+ //
+ //
+
+ if (IsEqualOrCloserToRoot(handlerFP, p->m_unwindFP))
+ {
+ used = true;
+
+ //
+ // Assume that this isn't going to block us at all --
+ // other threads may be waiting to patch or unpatch something,
+ // or to dispatch.
+ //
+ LOG((LF_CORDB, LL_INFO10000,
+ "Unwind trigger at offset 0x%p; handlerFP: 0x%p unwindReason: 0x%x.\n",
+ newOffset, handlerFP.GetSPValue(), unwindReason));
+
+ p->TriggerUnwind(thread,
+ fd, pDJI,
+ newOffset,
+ handlerFP,
+ unwindReason);
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "Unwind trigger at offset 0x%p; handlerFP: 0x%p unwindReason: 0x%x.\n",
+ newOffset, handlerFP.GetSPValue(), unwindReason));
+ }
+ }
+
+ p = pNext;
+ }
+ }
+
+ return used;
+}
+
+//
+// EnableTraceCall enables a call event on the controller
+// maxFrame is the leaf-most frame that we want notifications for.
+// For step-in stuff, this will always be LEAF_MOST_FRAME.
+// for step-out, this will be the current frame because we don't
+// care if the current frame calls back into managed code when we're
+// only interested in our parent frames.
+//
+
+void DebuggerController::EnableTraceCall(FramePointer maxFrame)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ASSERT(m_thread != NULL);
+
+ LOG((LF_CORDB,LL_INFO1000, "DC::ETC maxFrame=0x%x, thread=0x%x\n",
+ maxFrame.GetSPValue(), Debugger::GetThreadIdHelper(m_thread)));
+
+ // JMC stepper should never enabled this. (They should enable ME instead).
+ _ASSERTE((DEBUGGER_CONTROLLER_JMC_STEPPER != this->GetDCType()) || !"JMC stepper shouldn't enable trace-call");
+
+
+ ControllerLockHolder lockController;
+ {
+ if (!m_traceCall)
+ {
+ m_traceCall = true;
+ g_pEEInterface->EnableTraceCall(m_thread);
+ }
+
+ if (IsCloserToLeaf(maxFrame, m_traceCallFP))
+ m_traceCallFP = maxFrame;
+ }
+}
+
+struct PatchTargetVisitorData
+{
+ DebuggerController* controller;
+ FramePointer maxFrame;
+};
+
+VOID DebuggerController::PatchTargetVisitor(TADDR pVirtualTraceCallTarget, VOID* pUserData)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ DebuggerController* controller = ((PatchTargetVisitorData*) pUserData)->controller;
+ FramePointer maxFrame = ((PatchTargetVisitorData*) pUserData)->maxFrame;
+
+ EX_TRY
+ {
+ CONTRACT_VIOLATION(GCViolation); // PatchTrace throws, which implies GC-triggers
+ TraceDestination trace;
+ trace.InitForUnmanagedStub(pVirtualTraceCallTarget);
+ controller->PatchTrace(&trace, maxFrame, true);
+ }
+ EX_CATCH
+ {
+ // not much we can do here
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+//
+// DisableTraceCall disables call events on the controller
+//
+
+void DebuggerController::DisableTraceCall()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ASSERT(m_thread != NULL);
+
+ ControllerLockHolder lockController;
+ {
+ if (m_traceCall)
+ {
+ LOG((LF_CORDB,LL_INFO1000, "DC::DTC thread=0x%x\n",
+ Debugger::GetThreadIdHelper(m_thread)));
+
+ g_pEEInterface->DisableTraceCall(m_thread);
+
+ m_traceCall = false;
+ m_traceCallFP = ROOT_MOST_FRAME;
+ }
+ }
+}
+
+// Get a FramePointer for the leafmost frame on this thread's stacktrace.
+// It's tempting to create this off the head of the Frame chain, but that may
+// include internal EE Frames (like GCRoot frames) which a FrameInfo-stackwalk may skip over.
+// Thus using the Frame chain would err on the side of returning a FramePointer that
+// closer to the leaf.
+FramePointer GetCurrentFramePointerFromStackTraceForTraceCall(Thread * thread)
+{
+ _ASSERTE(thread != NULL);
+
+ // Ensure this is really the same as CSI.
+ ControllerStackInfo info;
+
+ // It's possible this stackwalk may be done at an unsafe time.
+ // this method may trigger a GC, for example, in
+ // FramedMethodFrame::AskStubForUnmanagedCallSite
+ // which will trash the incoming argument array
+ // which is not gc-protected.
+
+ // We could probably imagine a more specialized stackwalk that
+ // avoids these calls and is thus GC_NOTRIGGER.
+ CONTRACT_VIOLATION(GCViolation);
+
+ // This is being run live, so there's no filter available.
+ CONTEXT *context;
+ context = g_pEEInterface->GetThreadFilterContext(thread);
+ _ASSERTE(context == NULL);
+ _ASSERTE(!ISREDIRECTEDTHREAD(thread));
+
+ // This is actually safe because we're coming from a TraceCall, which
+ // means we're not in the middle of a stub. We don't have some partially
+ // constructed frame, so we can safely traverse the stack.
+ // However, we may still have a problem w/ the GC-violation.
+ StackTraceTicket ticket(StackTraceTicket::SPECIAL_CASE_TICKET);
+ info.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, NULL);
+
+ FramePointer fp = info.m_activeFrame.fp;
+
+ return fp;
+}
+//
+// DispatchTraceCall is called when a call is traced in the EE
+// It dispatches the event to the appropriate controllers.
+//
+
+bool DebuggerController::DispatchTraceCall(Thread *thread,
+ const BYTE *ip)
+{
+ CONTRACTL
+ {
+ GC_NOTRIGGER;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ bool used = false;
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "DC::DTC: TraceCall at 0x%x\n", ip));
+
+ ControllerLockHolder lockController;
+ {
+ DebuggerController *p;
+
+ p = g_controllers;
+ while (p != NULL)
+ {
+ DebuggerController *pNext = p->m_next;
+
+ if (p->m_thread == thread && p->m_traceCall)
+ {
+ bool trigger;
+
+ if (p->m_traceCallFP == LEAF_MOST_FRAME)
+ trigger = true;
+ else
+ {
+ // We know we don't have a filter context, so get a frame pointer from our frame chain.
+ FramePointer fpToCheck = GetCurrentFramePointerFromStackTraceForTraceCall(thread);
+
+
+ // <REVISIT_TODO>
+ //
+ // Currently, we never ever put a patch in an IL stub, and as such, if the IL stub
+ // throws an exception after returning from unmanaged code, we would not trigger
+ // a trace call when we call the constructor of the exception. The following is
+ // kind of a workaround to make that working. If we ever make the change to stop in
+ // IL stubs (for example, if we start to share security IL stub), then this can be
+ // removed.
+ //
+ // </REVISIT_TODO>
+
+
+
+ // It's possible this stackwalk may be done at an unsafe time.
+ // this method may trigger a GC, for example, in
+ // FramedMethodFrame::AskStubForUnmanagedCallSite
+ // which will trash the incoming argument array
+ // which is not gc-protected.
+ ControllerStackInfo info;
+ {
+ CONTRACT_VIOLATION(GCViolation);
+#ifdef _DEBUG
+ CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
+#endif // _DEBUG
+ _ASSERTE(context == NULL);
+ _ASSERTE(!ISREDIRECTEDTHREAD(thread));
+
+ // See explanation in GetCurrentFramePointerFromStackTraceForTraceCall.
+ StackTraceTicket ticket(StackTraceTicket::SPECIAL_CASE_TICKET);
+ info.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, NULL);
+ }
+
+ if (info.m_activeFrame.chainReason == CHAIN_ENTER_UNMANAGED)
+ {
+ _ASSERTE(info.HasReturnFrame());
+
+ // This check makes sure that we don't do this logic for inlined frames.
+ if (info.m_returnFrame.md->IsILStub())
+ {
+ // Make sure that the frame pointer of the active frame is actually
+ // the address of an exit frame.
+ _ASSERTE( (static_cast<Frame*>(info.m_activeFrame.fp.GetSPValue()))->GetFrameType()
+ == Frame::TYPE_EXIT );
+ _ASSERTE(!info.m_returnFrame.HasChainMarker());
+ fpToCheck = info.m_returnFrame.fp;
+ }
+ }
+
+ // @todo - This comparison seems somewhat nonsensical. We don't have a filter context
+ // in place, so what frame pointer is fpToCheck actually for?
+ trigger = IsEqualOrCloserToRoot(fpToCheck, p->m_traceCallFP);
+ }
+
+ if (trigger)
+ {
+ used = true;
+
+ // This can only update controller's state, can't actually send IPC events.
+ p->TriggerTraceCall(thread, ip);
+ }
+ }
+
+ p = pNext;
+ }
+ }
+
+ return used;
+}
+
+bool DebuggerController::IsMethodEnterEnabled()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fEnableMethodEnter;
+}
+
+
+// Notify dispatching logic that this controller wants to get TriggerMethodEnter
+// We keep a count of total controllers waiting for MethodEnter (in g_cTotalMethodEnter).
+// That way we know if any controllers want MethodEnter callbacks. If none do,
+// then we can set the JMC probe flag to false for all modules.
+void DebuggerController::EnableMethodEnter()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ControllerLockHolder chController;
+ Debugger::DebuggerDataLockHolder chInfo(g_pDebugger);
+
+ // Both JMC + Traditional steppers may use MethodEnter.
+ // For JMC, it's a core part of functionality. For Traditional steppers, we use it as a backstop
+ // in case the stub-managers fail.
+ _ASSERTE(g_cTotalMethodEnter >= 0);
+ if (!m_fEnableMethodEnter)
+ {
+ LOG((LF_CORDB, LL_INFO1000000, "DC::EnableME, this=%p, previously disabled\n", this));
+ m_fEnableMethodEnter = true;
+
+ g_cTotalMethodEnter++;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO1000000, "DC::EnableME, this=%p, already set\n", this));
+ }
+ g_pDebugger->UpdateAllModuleJMCFlag(g_cTotalMethodEnter != 0); // Needs JitInfo lock
+}
+
+// Notify dispatching logic that this controller doesn't want to get
+// TriggerMethodEnter
+void DebuggerController::DisableMethodEnter()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ ControllerLockHolder chController;
+ Debugger::DebuggerDataLockHolder chInfo(g_pDebugger);
+
+ if (m_fEnableMethodEnter)
+ {
+ LOG((LF_CORDB, LL_INFO1000000, "DC::DisableME, this=%p, previously set\n", this));
+ m_fEnableMethodEnter = false;
+
+ g_cTotalMethodEnter--;
+ _ASSERTE(g_cTotalMethodEnter >= 0);
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO1000000, "DC::DisableME, this=%p, already disabled\n", this));
+ }
+
+ g_pDebugger->UpdateAllModuleJMCFlag(g_cTotalMethodEnter != 0); // Needs JitInfo lock
+}
+
+// Loop through controllers and dispatch TriggerMethodEnter
+void DebuggerController::DispatchMethodEnter(void * pIP, FramePointer fp)
+{
+ _ASSERTE(pIP != NULL);
+
+ Thread * pThread = g_pEEInterface->GetThread();
+ _ASSERTE(pThread != NULL);
+
+ // Lookup the DJI for this method & ip.
+ // Since we create DJIs when we jit the code, and this code has been jitted
+ // (that's where the probe's coming from!), we will have a DJI.
+ DebuggerJitInfo * dji = g_pDebugger->GetJitInfoFromAddr((TADDR) pIP);
+
+ // This includes the case where we have a LightWeight codegen method.
+ if (dji == NULL)
+ {
+ return;
+ }
+
+ LOG((LF_CORDB, LL_INFO100000, "DC::DispatchMethodEnter for '%s::%s'\n",
+ dji->m_fd->m_pszDebugClassName,
+ dji->m_fd->m_pszDebugMethodName));
+
+ ControllerLockHolder lockController;
+
+ // For debug check, keep a count to make sure that g_cTotalMethodEnter
+ // is actually the number of controllers w/ MethodEnter enabled.
+ int count = 0;
+
+ DebuggerController *p = g_controllers;
+ while (p != NULL)
+ {
+ if (p->m_fEnableMethodEnter)
+ {
+ if ((p->GetThread() == NULL) || (p->GetThread() == pThread))
+ {
+ ++count;
+ p->TriggerMethodEnter(pThread, dji, (const BYTE *) pIP, fp);
+ }
+ }
+ p = p->m_next;
+ }
+
+ _ASSERTE(g_cTotalMethodEnter == count);
+
+}
+
+//
+// AddProtection adds page protection to (at least) the given range of
+// addresses
+//
+
+void DebuggerController::AddProtection(const BYTE *start, const BYTE *end,
+ bool readable)
+{
+ // !!!
+ _ASSERTE(!"Not implemented yet");
+}
+
+//
+// RemoveProtection removes page protection from the given
+// addresses. The parameters should match an earlier call to
+// AddProtection
+//
+
+void DebuggerController::RemoveProtection(const BYTE *start, const BYTE *end,
+ bool readable)
+{
+ // !!!
+ _ASSERTE(!"Not implemented yet");
+}
+
+
+// Default implementations for FuncEvalEnter & Exit notifications.
+void DebuggerController::TriggerFuncEvalEnter(Thread * thread)
+{
+ LOG((LF_CORDB, LL_INFO100000, "DC::TFEEnter, thead=%p, this=%p\n", thread, this));
+}
+
+void DebuggerController::TriggerFuncEvalExit(Thread * thread)
+{
+ LOG((LF_CORDB, LL_INFO100000, "DC::TFEExit, thead=%p, this=%p\n", thread, this));
+}
+
+// bool DebuggerController::TriggerPatch() What: Tells the
+// static DC whether this patch should be activated now.
+// Returns true if it should be, false otherwise.
+// How: Base class implementation returns false. Others may
+// return true.
+TP_RESULT DebuggerController::TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerPatch\n"));
+ return TPR_IGNORE;
+}
+
+bool DebuggerController::TriggerSingleStep(Thread *thread,
+ const BYTE *ip)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerSingleStep\n"));
+ return false;
+}
+
+void DebuggerController::TriggerUnwind(Thread *thread,
+ MethodDesc *fd, DebuggerJitInfo * pDJI, SIZE_T offset,
+ FramePointer fp,
+ CorDebugStepReason unwindReason)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerUnwind\n"));
+}
+
+void DebuggerController::TriggerTraceCall(Thread *thread,
+ const BYTE *ip)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerTraceCall\n"));
+}
+
+TP_RESULT DebuggerController::TriggerExceptionHook(Thread *thread, CONTEXT * pContext,
+ EXCEPTION_RECORD *exception)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerExceptionHook\n"));
+ return TPR_IGNORE;
+}
+
+void DebuggerController::TriggerMethodEnter(Thread * thread,
+ DebuggerJitInfo * dji,
+ const BYTE * ip,
+ FramePointer fp)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DC::TME in default impl. dji=%p, addr=%p, fp=%p\n",
+ dji, ip, fp.GetSPValue()));
+}
+
+bool DebuggerController::SendEvent(Thread *thread, bool fIpChanged)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ SENDEVENT_CONTRACT_ITEMS;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default SendEvent\n"));
+
+ // If any derived class trigger SendEvent, it should also implement SendEvent.
+ _ASSERTE(false || !"Base DebuggerController sending an event?");
+ return false;
+}
+
+
+// Dispacth Func-Eval Enter & Exit notifications.
+void DebuggerController::DispatchFuncEvalEnter(Thread * thread)
+{
+ LOG((LF_CORDB, LL_INFO100000, "DC::DispatchFuncEvalEnter for thread 0x%p\n", thread));
+
+ ControllerLockHolder lockController;
+
+ DebuggerController *p = g_controllers;
+ while (p != NULL)
+ {
+ if ((p->GetThread() == NULL) || (p->GetThread() == thread))
+ {
+ p->TriggerFuncEvalEnter(thread);
+ }
+
+ p = p->m_next;
+ }
+
+
+}
+
+void DebuggerController::DispatchFuncEvalExit(Thread * thread)
+{
+ LOG((LF_CORDB, LL_INFO100000, "DC::DispatchFuncEvalExit for thread 0x%p\n", thread));
+
+ ControllerLockHolder lockController;
+
+ DebuggerController *p = g_controllers;
+ while (p != NULL)
+ {
+ if ((p->GetThread() == NULL) || (p->GetThread() == thread))
+ {
+ p->TriggerFuncEvalExit(thread);
+ }
+
+ p = p->m_next;
+ }
+
+
+}
+
+
+#ifdef _DEBUG
+// See comment in DispatchNativeException
+void ThisFunctionMayHaveTriggerAGC()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ GC_TRIGGERS;
+ NOTHROW;
+ }
+ CONTRACTL_END;
+}
+#endif
+
+// bool DebuggerController::DispatchNativeException() Figures out
+// if any debugger controllers will handle the exception.
+// DispatchNativeException should be called by the EE when a native exception
+// occurs. If it returns true, the exception was generated by a Controller and
+// should be ignored.
+// How: Calls DispatchExceptionHook to see if anything is
+// interested in ExceptionHook, then does a switch on dwCode:
+// EXCEPTION_BREAKPOINT means invoke DispatchPatchOrSingleStep(ST_PATCH).
+// EXCEPTION_SINGLE_STEP means DispatchPatchOrSingleStep(ST_SINGLE_STEP).
+// EXCEPTION_ACCESS_VIOLATION means invoke DispatchAccessViolation.
+// Returns true if the exception was actually meant for the debugger,
+// returns false otherwise.
+bool DebuggerController::DispatchNativeException(EXCEPTION_RECORD *pException,
+ CONTEXT *pContext,
+ DWORD dwCode,
+ Thread *pCurThread)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+
+ // If this exception is for the debugger, then we may trigger a GC.
+ // But we'll be called on _any_ exception, including ones in a GC-no-triggers region.
+ // Our current contract system doesn't let us specify such conditions on GC_TRIGGERS.
+ // So we disable it now, and if we find out the exception is meant for the debugger,
+ // we'll call ThisFunctionMayHaveTriggerAGC() to ping that we're really a GC_TRIGGERS.
+ DISABLED(GC_TRIGGERS); // Only GC triggers if we send an event,
+ PRECONDITION(!IsDbgHelperSpecialThread());
+
+ // If we're called from preemptive mode, than our caller has protected the stack.
+ // If we're in cooperative mode, then we need to protect the stack before toggling GC modes
+ // (by setting the filter-context)
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pException));
+ PRECONDITION(CheckPointer(pContext));
+ PRECONDITION(CheckPointer(pCurThread));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "DispatchNativeException was called\n"));
+ LOG((LF_CORDB, LL_INFO10000, "Native exception at 0x%x, code=0x%8x, context=0x%p, er=0x%p\n",
+ pException->ExceptionAddress, dwCode, pContext, pException));
+
+
+ bool fDebuggers;
+ BOOL fDispatch;
+ DPOSS_ACTION result = DPOSS_DONT_CARE;
+
+
+ // We have a potentially ugly locking problem here. This notification is called on any exception,
+ // but we have no idea what our locking context is at the time. Thus we may hold locks smaller
+ // than the controller lock.
+ // The debugger logic really only cares about exceptions directly in managed code (eg, hardware exceptions)
+ // or in patch-skippers (since that's a copy of managed code running in a look-aside buffer).
+ // That should exclude all C++ exceptions, which are the common case if Runtime code throws an internal ex.
+ // So we ignore those to avoid the lock violation.
+ if (pException->ExceptionCode == EXCEPTION_MSVC)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "Debugger skipping for C++ exception.\n"));
+ return FALSE;
+ }
+
+ // The debugger really only cares about exceptions in managed code. Any exception that occurs
+ // while the thread is redirected (such as EXCEPTION_HIJACK) is not of interest to the debugger.
+ // Allowing this would be problematic because when an exception occurs while the thread is
+ // redirected, we don't know which context (saved redirection context or filter context)
+ // we should be operating on (see code:GetManagedStoppedCtx).
+ if( ISREDIRECTEDTHREAD(pCurThread) )
+ {
+ LOG((LF_CORDB, LL_INFO1000, "Debugger ignoring exception 0x%x on redirected thread.\n", dwCode));
+
+ // We shouldn't be seeing debugging exceptions on a redirected thread. While a thread is
+ // redirected we only call a few internal things (see code:Thread.RedirectedHandledJITCase),
+ // and may call into the host. We can't call normal managed code or anything we'd want to debug.
+ _ASSERTE(dwCode != EXCEPTION_BREAKPOINT);
+ _ASSERTE(dwCode != EXCEPTION_SINGLE_STEP);
+
+ return FALSE;
+ }
+
+ // It's possible we're here without a debugger (since we have to call the
+ // patch skippers). The Debugger may detach anytime,
+ // so remember the attach state now.
+#ifdef _DEBUG
+ bool fWasAttached = false;
+#ifdef DEBUGGING_SUPPORTED
+ fWasAttached = (CORDebuggerAttached() != 0);
+#endif //DEBUGGING_SUPPORTED
+#endif //_DEBUG
+
+ {
+ // If we're in cooperative mode, it's unsafe to do a GC until we've put a filter context in place.
+ GCX_NOTRIGGER();
+
+ // If we know the debugger doesn't care about this exception, bail now.
+ // Usually this is just if there's a debugger attached.
+ // However, if a debugger detached but left outstanding controllers (like patch-skippers),
+ // we still may care.
+ // The only way a controller would get created outside of the helper thread is from
+ // a patch skipper, so we always handle breakpoints.
+ if (!CORDebuggerAttached() && (g_controllers == NULL) && (dwCode != EXCEPTION_BREAKPOINT))
+ {
+ return false;
+ }
+
+ FireEtwDebugExceptionProcessingStart();
+
+ // We should never be here if the debugger was never involved.
+ CONTEXT * pOldContext;
+ pOldContext = pCurThread->GetFilterContext();
+
+ // In most cases it is an error to nest, however in the patch-skipping logic we must
+ // copy an unknown amount of code into another buffer and it occasionally triggers
+ // an AV. This heuristic should filter that case out. See DDB 198093.
+ // Ensure we perform this exception nesting filtering even before the call to
+ // DebuggerController::DispatchExceptionHook, otherwise the nesting will continue when
+ // a contract check is triggered in DispatchExceptionHook and another BP exception is
+ // raised. See Dev11 66058.
+ if ((pOldContext != NULL) && pCurThread->AVInRuntimeImplOkay() &&
+ pException->ExceptionCode == STATUS_ACCESS_VIOLATION)
+ {
+ STRESS_LOG1(LF_CORDB, LL_INFO100, "DC::DNE Nested Access Violation at 0x%p is being ignored\n",
+ pException->ExceptionAddress);
+ return false;
+ }
+ // Otherwise it is an error to nest at all
+ _ASSERTE(pOldContext == NULL);
+
+ fDispatch = DebuggerController::DispatchExceptionHook(pCurThread,
+ pContext,
+ pException);
+
+ {
+ // Must be in cooperative mode to set the filter context. We know there are times we'll be in preemptive mode,
+ // (such as M2U handoff, or potentially patches in the middle of a stub, or various random exceptions)
+
+ // @todo - We need to worry about GC-protecting our stack. If we're in preemptive mode, the caller did it for us.
+ // If we're in cooperative, then we need to set the FilterContext *before* we toggle GC mode (since
+ // the FC protects the stack).
+ // If we're in preemptive, then we need to set the FilterContext *after* we toggle ourselves to Cooperative.
+ // Also note it may not be possible to toggle GC mode at these times (such as in the middle of the stub).
+ //
+ // Part of the problem is that the Filter Context is serving 2 purposes here:
+ // - GC protect the stack. (essential if we're in coop mode).
+ // - provide info to controllers (such as current IP, and a place to set the Single-Step flag).
+ //
+ // This contract violation is mitigated in that we must have had the debugger involved to get to this point.
+ CONTRACT_VIOLATION(ModeViolation);
+ g_pEEInterface->SetThreadFilterContext(pCurThread, pContext);
+ }
+ // Now that we've set the filter context, we can let the GCX_NOTRIGGER expire.
+ // It's still possible that we may be called from a No-trigger region.
+ }
+
+
+ if (fDispatch)
+ {
+ // Disable SingleStep for all controllers on this thread. This requires the filter context set.
+ // This is what would disable the ss-flag when single-stepping over an AV.
+ if (g_patchTableValid && (dwCode != EXCEPTION_SINGLE_STEP))
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DC::DNE non-single-step exception; check if any controller has ss turned on\n"));
+
+ ControllerLockHolder lockController;
+ for (DebuggerController* p = g_controllers; p != NULL; p = p->m_next)
+ {
+ if (p->m_singleStep && (p->m_thread == pCurThread))
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DC::DNE turn off ss for controller 0x%p\n", p));
+ p->DisableSingleStep();
+ }
+ }
+ // implicit controller lock release
+ }
+
+ CORDB_ADDRESS_TYPE * ip = dac_cast<PTR_CORDB_ADDRESS_TYPE>(GetIP(pContext));
+
+ switch (dwCode)
+ {
+ case EXCEPTION_BREAKPOINT:
+ // EIP should be properly set up at this point.
+ result = DebuggerController::DispatchPatchOrSingleStep(pCurThread,
+ pContext,
+ ip,
+ ST_PATCH);
+ LOG((LF_CORDB, LL_EVERYTHING, "DC::DNE DispatchPatch call returned\n"));
+
+ // If we detached, we should remove all our breakpoints. So if we try
+ // to handle this breakpoint, make sure that we're attached.
+ if (IsInUsedAction(result) == true)
+ {
+ _ASSERTE(fWasAttached);
+ }
+ break;
+
+ case EXCEPTION_SINGLE_STEP:
+ LOG((LF_CORDB, LL_EVERYTHING, "DC::DNE SINGLE_STEP Exception\n"));
+
+ result = DebuggerController::DispatchPatchOrSingleStep(pCurThread,
+ pContext,
+ ip,
+ (SCAN_TRIGGER)(ST_PATCH|ST_SINGLE_STEP));
+ // We pass patch | single step since single steps actually
+ // do both (eg, you SS onto a breakpoint).
+ break;
+
+ default:
+ break;
+ } // end switch
+
+ }
+#ifdef _DEBUG
+ else
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DC:: DNE step-around fDispatch:0x%x!\n", fDispatch));
+ }
+#endif //_DEBUG
+
+ fDebuggers = (fDispatch?(IsInUsedAction(result)?true:false):true);
+
+ LOG((LF_CORDB, LL_INFO10000, "DC::DNE, returning 0x%x.\n", fDebuggers));
+
+#ifdef _DEBUG
+ if (fDebuggers && (result == DPOSS_USED_WITH_EVENT))
+ {
+ // If the exception belongs to the debugger, then we may have sent an event,
+ // and thus we may have triggered a GC.
+ ThisFunctionMayHaveTriggerAGC();
+ }
+#endif
+
+
+
+ // Must restore the filter context. After the filter context is gone, we're
+ // unprotected again and unsafe for a GC.
+ {
+ CONTRACT_VIOLATION(ModeViolation);
+ g_pEEInterface->SetThreadFilterContext(pCurThread, NULL);
+ }
+
+#ifdef _TARGET_ARM_
+ if (pCurThread->IsSingleStepEnabled())
+ pCurThread->ApplySingleStep(pContext);
+#endif
+
+ FireEtwDebugExceptionProcessingEnd();
+
+ return fDebuggers;
+}
+
+// * -------------------------------------------------------------------------
+// * DebuggerPatchSkip routines
+// * -------------------------------------------------------------------------
+
+DebuggerPatchSkip::DebuggerPatchSkip(Thread *thread,
+ DebuggerControllerPatch *patch,
+ AppDomain *pAppDomain)
+ : DebuggerController(thread, pAppDomain),
+ m_address(patch->address)
+{
+ LOG((LF_CORDB, LL_INFO10000,
+ "DPS::DPS: Patch skip 0x%x\n", patch->address));
+
+ // On ARM the single-step emulation already utilizes a per-thread execution buffer similar to the scheme
+ // below. As a result we can skip most of the instruction parsing logic that's instead internalized into
+ // the single-step emulation itself.
+#ifndef _TARGET_ARM_
+
+ // NOTE: in order to correctly single-step RIP-relative writes on multiple threads we need to set up
+ // a shared buffer with the instruction and a buffer for the RIP-relative value so that all threads
+ // are working on the same copy. as the single-steps complete the modified data in the buffer is
+ // copied back to the real address to ensure proper execution of the program.
+
+ //
+ // Create the shared instruction block. this will also create the shared RIP-relative buffer
+ //
+
+ m_pSharedPatchBypassBuffer = patch->GetOrCreateSharedPatchBypassBuffer();
+ BYTE* patchBypass = m_pSharedPatchBypassBuffer->PatchBypass;
+
+ // Copy the instruction block over to the patch skip
+ // WARNING: there used to be an issue here because CopyInstructionBlock copied the breakpoint from the
+ // jitted code stream into the patch buffer. Further below CORDbgSetInstruction would correct the
+ // first instruction. This buffer is shared by all threads so if another thread executed the buffer
+ // between this thread's execution of CopyInstructionBlock and CORDbgSetInstruction the wrong
+ // code would be executed. The bug has been fixed by changing CopyInstructionBlock to only copy
+ // the code bytes after the breakpoint.
+ // You might be tempted to stop copying the code at all, however that wouldn't work well with rejit.
+ // If we skip a breakpoint that is sitting at the beginning of a method, then the profiler rejits that
+ // method causing a jump-stamp to be placed, then we skip the breakpoint again, we need to make sure
+ // the 2nd skip executes the new jump-stamp code and not the original method prologue code. Copying
+ // the code every time ensures that we have the most up-to-date version of the code in the buffer.
+ _ASSERTE( patch->IsBound() );
+ CopyInstructionBlock(patchBypass, (const BYTE *)patch->address);
+
+ // Technically, we could create a patch skipper for an inactive patch, but we rely on the opcode being
+ // set here.
+ _ASSERTE( patch->IsActivated() );
+ CORDbgSetInstruction((CORDB_ADDRESS_TYPE *)patchBypass, patch->opcode);
+
+ LOG((LF_CORDB, LL_EVERYTHING, "SetInstruction was called\n"));
+
+ //
+ // Look at instruction to get some attributes
+ //
+
+ NativeWalker::DecodeInstructionForPatchSkip(patchBypass, &(m_instrAttrib));
+
+#if defined(_TARGET_AMD64_)
+ // The code below handles RIP-relative addressing on AMD64. the original implementation made the assumption that
+ // we are only using RIP-relative addressing to access read-only data (see VSW 246145 for more information). this
+ // has since been expanded to handle RIP-relative writes as well.
+ if (m_instrAttrib.m_dwOffsetToDisp != 0)
+ {
+ _ASSERTE(m_instrAttrib.m_cbInstr != 0);
+
+ //
+ // Populate the RIP-relative buffer with the current value if needed
+ //
+
+ BYTE* bufferBypass = m_pSharedPatchBypassBuffer->BypassBuffer;
+
+ // Overwrite the *signed* displacement.
+ int dwOldDisp = *(int*)(&patchBypass[m_instrAttrib.m_dwOffsetToDisp]);
+ int dwNewDisp = offsetof(SharedPatchBypassBuffer, BypassBuffer) -
+ (offsetof(SharedPatchBypassBuffer, PatchBypass) + m_instrAttrib.m_cbInstr);
+ *(int*)(&patchBypass[m_instrAttrib.m_dwOffsetToDisp]) = dwNewDisp;
+
+ // This could be an LEA, which we'll just have to change into a MOV
+ // and copy the original address
+ if (((patchBypass[0] == 0x4C) || (patchBypass[0] == 0x48)) && (patchBypass[1] == 0x8d))
+ {
+ patchBypass[1] = 0x8b; // MOV reg, mem
+ _ASSERTE((int)sizeof(void*) <= SharedPatchBypassBuffer::cbBufferBypass);
+ *(void**)bufferBypass = (void*)(patch->address + m_instrAttrib.m_cbInstr + dwOldDisp);
+ }
+ else
+ {
+ // Copy the data into our buffer.
+ memcpy(bufferBypass, patch->address + m_instrAttrib.m_cbInstr + dwOldDisp, SharedPatchBypassBuffer::cbBufferBypass);
+
+ if (m_instrAttrib.m_fIsWrite)
+ {
+ // save the actual destination address and size so when we TriggerSingleStep() we can update the value
+ m_pSharedPatchBypassBuffer->RipTargetFixup = (UINT_PTR)(patch->address + m_instrAttrib.m_cbInstr + dwOldDisp);
+ m_pSharedPatchBypassBuffer->RipTargetFixupSize = m_instrAttrib.m_cOperandSize;
+ }
+ }
+ }
+#endif // _TARGET_AMD64_
+
+#endif // !_TARGET_ARM_
+
+ // Signals our thread that the debugger will be manipulating the context
+ // during the patch skip operation. This effectively prevents other threads
+ // from suspending us until we have completed skiping the patch and restored
+ // a good context (See DDB 188816)
+ thread->BeginDebuggerPatchSkip(this);
+
+ //
+ // Set IP of context to point to patch bypass buffer
+ //
+
+ CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
+ _ASSERTE(!ISREDIRECTEDTHREAD(thread));
+ CONTEXT c;
+ if (context == NULL)
+ {
+ // We can't play with our own context!
+#if _DEBUG
+ if (g_pEEInterface->GetThread())
+ {
+ // current thread is mamaged thread
+ _ASSERTE(Debugger::GetThreadIdHelper(thread) != Debugger::GetThreadIdHelper(g_pEEInterface->GetThread()));
+ }
+#endif // _DEBUG
+
+ c.ContextFlags = CONTEXT_CONTROL;
+
+ thread->GetThreadContext(&c);
+ context = &c;
+
+ ARM_ONLY(_ASSERTE(!"We should always have a filter context in DebuggerPatchSkip."));
+ }
+
+#ifdef _TARGET_ARM_
+ // Since we emulate all single-stepping on ARM using an instruction buffer and a breakpoint all we have to
+ // do here is initiate a normal single-step except that we pass the instruction to be stepped explicitly
+ // (calling EnableSingleStep() would infer this by looking at the PC in the context, which would pick up
+ // the patch we're trying to skip).
+ //
+ // Ideally we'd refactor the EnableSingleStep to support this alternative calling sequence but since this
+ // involves three levels of methods and is only applicable to ARM we've chosen to replicate the relevant
+ // implementation here instead.
+ {
+ ControllerLockHolder lockController;
+ g_pEEInterface->MarkThreadForDebugStepping(thread, true);
+ WORD opcode2 = 0;
+
+ if (Is32BitInstruction(patch->opcode))
+ {
+ opcode2 = CORDbgGetInstruction((CORDB_ADDRESS_TYPE *)(((DWORD)patch->address) + 2));
+ }
+
+ thread->BypassWithSingleStep((DWORD)patch->address, patch->opcode, opcode2);
+ m_singleStep = true;
+ }
+#else // _TARGET_ARM_
+
+ //set eip to point to buffer...
+ SetIP(context, (PCODE)patchBypass);
+
+ if (context == &c)
+ thread->SetThreadContext(&c);
+
+
+ LOG((LF_CORDB, LL_INFO10000, "Bypass at 0x%x\n", patchBypass));
+
+ //
+ // Turn on single step (if the platform supports it) so we can
+ // fix up state after the instruction is executed.
+ // Also turn on exception hook so we can adjust IP in exceptions
+ //
+
+ EnableSingleStep();
+
+#endif // _TARGET_ARM_
+
+ EnableExceptionHook();
+}
+
+DebuggerPatchSkip::~DebuggerPatchSkip()
+{
+#ifndef _TARGET_ARM_
+ _ASSERTE(m_pSharedPatchBypassBuffer);
+ m_pSharedPatchBypassBuffer->Release();
+#endif
+}
+
+//
+// We have to have a whole seperate function for this because you
+// can't use __try in a function that requires object unwinding...
+//
+
+LONG FilterAccessViolation2(LPEXCEPTION_POINTERS ep, PVOID pv)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
+ ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH;
+}
+
+// This helper is required because the AVInRuntimeImplOkayHolder can not
+// be directly placed inside the scope of a PAL_TRY
+void _CopyInstructionBlockHelper(BYTE* to, const BYTE* from)
+{
+ AVInRuntimeImplOkayHolder AVOkay;
+
+ // This function only copies the portion of the instruction that follows the
+ // breakpoint opcode, not the breakpoint itself
+ to += CORDbg_BREAK_INSTRUCTION_SIZE;
+ from += CORDbg_BREAK_INSTRUCTION_SIZE;
+
+ // If an AV occurs because we walked off a valid page then we need
+ // to be certain that all bytes on the previous page were copied.
+ // We are certain that we copied enough bytes to contain the instruction
+ // because it must have fit within the valid page.
+ for (int i = 0; i < MAX_INSTRUCTION_LENGTH - CORDbg_BREAK_INSTRUCTION_SIZE; i++)
+ {
+ *to++ = *from++;
+ }
+
+}
+
+// WARNING: this function skips copying the first CORDbg_BREAK_INSTRUCTION_SIZE bytes by design
+// See the comment at the callsite in DebuggerPatchSkip::DebuggerPatchSkip for more details on
+// this
+void DebuggerPatchSkip::CopyInstructionBlock(BYTE *to, const BYTE* from)
+{
+ // We wrap the memcpy in an exception handler to handle the
+ // extremely rare case where we're copying an instruction off the
+ // end of a method that is also at the end of a page, and the next
+ // page is unmapped.
+ struct Param
+ {
+ BYTE *to;
+ const BYTE* from;
+ } param;
+ param.to = to;
+ param.from = from;
+ PAL_TRY(Param *, pParam, &param)
+ {
+ _CopyInstructionBlockHelper(pParam->to, pParam->from);
+ }
+ PAL_EXCEPT_FILTER(FilterAccessViolation2)
+ {
+ // The whole point is that if we copy up the the AV, then
+ // that's enough to execute, otherwise we would not have been
+ // able to execute the code anyway. So we just ignore the
+ // exception.
+ LOG((LF_CORDB, LL_INFO10000,
+ "DPS::DPS: AV copying instruction block ignored.\n"));
+ }
+ PAL_ENDTRY
+
+ // We just created a new buffer of code, but the CPU caches code and may
+ // not be aware of our changes. This should force the CPU to dump any cached
+ // instructions it has in this region and load the new ones from memory
+ FlushInstructionCache(GetCurrentProcess(), to + CORDbg_BREAK_INSTRUCTION_SIZE,
+ MAX_INSTRUCTION_LENGTH - CORDbg_BREAK_INSTRUCTION_SIZE);
+}
+
+TP_RESULT DebuggerPatchSkip::TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy)
+{
+ ARM_ONLY(_ASSERTE(!"Should not have called DebuggerPatchSkip::TriggerPatch."));
+ LOG((LF_CORDB, LL_EVERYTHING, "DPS::TP called\n"));
+
+#if defined(_DEBUG) && !defined(_TARGET_ARM_)
+ CONTEXT *context = GetManagedLiveCtx(thread);
+
+ LOG((LF_CORDB, LL_INFO1000, "DPS::TP: We've patched 0x%x (byPass:0x%x) "
+ "for a skip after an EnC update!\n", GetIP(context),
+ GetBypassAddress()));
+ _ASSERTE(g_patches != NULL);
+
+ // We shouldn't have mucked with EIP, yet.
+ _ASSERTE(dac_cast<PTR_CORDB_ADDRESS_TYPE>(GetIP(context)) == GetBypassAddress());
+
+ //We should be the _only_ patch here
+ MethodDesc *md2 = dac_cast<PTR_MethodDesc>(GetIP(context));
+ DebuggerControllerPatch *patchCheck = g_patches->GetPatch(g_pEEInterface->MethodDescGetModule(md2),md2->GetMemberDef());
+ _ASSERTE(patchCheck == patch);
+ _ASSERTE(patchCheck->controller == patch->controller);
+
+ patchCheck = g_patches->GetNextPatch(patchCheck);
+ _ASSERTE(patchCheck == NULL);
+#endif // _DEBUG
+
+ DisableAll();
+ EnableExceptionHook();
+ EnableSingleStep(); //gets us back to where we want.
+ return TPR_IGNORE; // don't actually want to stop here....
+}
+
+TP_RESULT DebuggerPatchSkip::TriggerExceptionHook(Thread *thread, CONTEXT * context,
+ EXCEPTION_RECORD *exception)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ // Patch skippers only operate on patches set in managed code. But the infrastructure may have
+ // toggled the GC mode underneath us.
+ MODE_ANY;
+
+ PRECONDITION(GetThread() == thread);
+ PRECONDITION(thread != NULL);
+ PRECONDITION(CheckPointer(context));
+ }
+ CONTRACTL_END;
+
+ if (m_pAppDomain != NULL)
+ {
+ AppDomain *pAppDomainCur = thread->GetDomain();
+
+ if (pAppDomainCur != m_pAppDomain)
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DPS::TEH: Appdomain mismatch - not skiiping!\n"));
+ return TPR_IGNORE;
+ }
+ }
+
+ LOG((LF_CORDB,LL_INFO10000, "DPS::TEH: doing the patch-skip thing\n"));
+
+#ifndef _TARGET_ARM_
+ _ASSERTE(m_pSharedPatchBypassBuffer);
+ BYTE* patchBypass = m_pSharedPatchBypassBuffer->PatchBypass;
+
+ if (m_instrAttrib.m_fIsCall && IsSingleStep(exception->ExceptionCode))
+ {
+ // Fixup return address on stack
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ SIZE_T *sp = (SIZE_T *) GetSP(context);
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "Bypass call return address redirected from 0x%p\n", *sp));
+
+ *sp -= patchBypass - (BYTE*)m_address;
+
+ LOG((LF_CORDB, LL_INFO10000, "to 0x%p\n", *sp));
+#else
+ PORTABILITY_ASSERT("DebuggerPatchSkip::TriggerExceptionHook -- return address fixup NYI");
+#endif
+ }
+
+ if (!m_instrAttrib.m_fIsAbsBranch || !IsSingleStep(exception->ExceptionCode))
+ {
+ // Fixup IP
+
+ LOG((LF_CORDB, LL_INFO10000, "Bypass instruction redirected from 0x%p\n", GetIP(context)));
+
+ if (IsSingleStep(exception->ExceptionCode))
+ {
+#ifndef FEATURE_PAL
+ // Check if the current IP is anywhere near the exception dispatcher logic.
+ // If it is, ignore the exception, as the real exception is coming next.
+ static FARPROC pExcepDispProc = NULL;
+
+ if (!pExcepDispProc)
+ {
+ HMODULE hNtDll = WszGetModuleHandle(W("ntdll.dll"));
+
+ if (hNtDll != NULL)
+ {
+ pExcepDispProc = GetProcAddress(hNtDll, "KiUserExceptionDispatcher");
+
+ if (!pExcepDispProc)
+ pExcepDispProc = (FARPROC)(size_t)(-1);
+ }
+ else
+ pExcepDispProc = (FARPROC)(size_t)(-1);
+ }
+
+ _ASSERTE(pExcepDispProc != NULL);
+
+ if ((size_t)pExcepDispProc != (size_t)(-1))
+ {
+ LPVOID pExcepDispEntryPoint = pExcepDispProc;
+
+ if ((size_t)GetIP(context) > (size_t)pExcepDispEntryPoint &&
+ (size_t)GetIP(context) <= ((size_t)pExcepDispEntryPoint + MAX_INSTRUCTION_LENGTH * 2 + 1))
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "Bypass instruction not redirected. Landed in exception dispatcher.\n"));
+
+ return (TPR_IGNORE_AND_STOP);
+ }
+ }
+#endif // FEATURE_PAL
+
+ // If the IP is close to the skip patch start, or if we were skipping over a call, then assume the IP needs
+ // adjusting.
+ if (m_instrAttrib.m_fIsCall ||
+ ((size_t)GetIP(context) > (size_t)patchBypass &&
+ (size_t)GetIP(context) <= (size_t)(patchBypass + MAX_INSTRUCTION_LENGTH + 1)))
+ {
+ LOG((LF_CORDB, LL_INFO10000, "Bypass instruction redirected because still in skip area.\n"));
+ LOG((LF_CORDB, LL_INFO10000, "m_fIsCall = %d, patchBypass = 0x%x, m_address = 0x%x\n",
+ m_instrAttrib.m_fIsCall, patchBypass, m_address));
+ SetIP(context, (PCODE)((BYTE *)GetIP(context) - (patchBypass - (BYTE *)m_address)));
+ }
+ else
+ {
+ // Otherwise, need to see if the IP is something we recognize (either managed code
+ // or stub code) - if not, we ignore the exception
+ PCODE newIP = GetIP(context);
+ newIP -= PCODE(patchBypass - (BYTE *)m_address);
+ TraceDestination trace;
+
+ if (g_pEEInterface->IsManagedNativeCode(dac_cast<PTR_CBYTE>(newIP)) ||
+ (g_pEEInterface->TraceStub(LPBYTE(newIP), &trace)))
+ {
+ LOG((LF_CORDB, LL_INFO10000, "Bypass instruction redirected because we landed in managed or stub code\n"));
+ SetIP(context, newIP);
+ }
+
+ // If we have no idea where things have gone, then we assume that the IP needs no adjusting (which
+ // could happen if the instruction we were trying to patch skip caused an AV). In this case we want
+ // to claim it as ours but ignore it and continue execution.
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000, "Bypass instruction not redirected because we're not in managed or stub code.\n"));
+ return (TPR_IGNORE_AND_STOP);
+ }
+ }
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000, "Bypass instruction redirected because it wasn't a single step exception.\n"));
+ SetIP(context, (PCODE)((BYTE *)GetIP(context) - (patchBypass - (BYTE *)m_address)));
+ }
+
+ LOG((LF_CORDB, LL_ALWAYS, "to 0x%x\n", GetIP(context)));
+
+ }
+
+#endif // _TARGET_ARM_
+
+ // Signals our thread that the debugger is done manipulating the context
+ // during the patch skip operation. This effectively prevented other threads
+ // from suspending us until we completed skiping the patch and restored
+ // a good context (See DDB 188816)
+ m_thread->EndDebuggerPatchSkip();
+
+ // Don't delete the controller yet if this is a single step exception, as the code will still want to dispatch to
+ // our single step method, and if it doesn't find something to dispatch to we won't continue from the exception.
+ //
+ // (This is kind of broken behavior but is easily worked around here
+ // by this test)
+ if (!IsSingleStep(exception->ExceptionCode))
+ {
+ Delete();
+ }
+
+ DisableExceptionHook();
+
+ return TPR_TRIGGER;
+}
+
+bool DebuggerPatchSkip::TriggerSingleStep(Thread *thread, const BYTE *ip)
+{
+ LOG((LF_CORDB,LL_INFO10000, "DPS::TSS: basically a no-op\n"));
+
+ if (m_pAppDomain != NULL)
+ {
+ AppDomain *pAppDomainCur = thread->GetDomain();
+
+ if (pAppDomainCur != m_pAppDomain)
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DPS::TSS: Appdomain mismatch - "
+ "not SingSteping!!\n"));
+ return false;
+ }
+ }
+#if defined(_TARGET_AMD64_)
+ // Dev11 91932: for RIP-relative writes we need to copy the value that was written in our buffer to the actual address
+ _ASSERTE(m_pSharedPatchBypassBuffer);
+ if (m_pSharedPatchBypassBuffer->RipTargetFixup)
+ {
+ _ASSERTE(m_pSharedPatchBypassBuffer->RipTargetFixupSize);
+
+ BYTE* bufferBypass = m_pSharedPatchBypassBuffer->BypassBuffer;
+ BYTE fixupSize = m_pSharedPatchBypassBuffer->RipTargetFixupSize;
+ UINT_PTR targetFixup = m_pSharedPatchBypassBuffer->RipTargetFixup;
+
+ switch (fixupSize)
+ {
+ case 1:
+ *(reinterpret_cast<BYTE*>(targetFixup)) = *(reinterpret_cast<BYTE*>(bufferBypass));
+ break;
+
+ case 2:
+ *(reinterpret_cast<WORD*>(targetFixup)) = *(reinterpret_cast<WORD*>(bufferBypass));
+ break;
+
+ case 4:
+ *(reinterpret_cast<DWORD*>(targetFixup)) = *(reinterpret_cast<DWORD*>(bufferBypass));
+ break;
+
+ case 8:
+ *(reinterpret_cast<ULONGLONG*>(targetFixup)) = *(reinterpret_cast<ULONGLONG*>(bufferBypass));
+ break;
+
+ case 16:
+ memcpy(reinterpret_cast<void*>(targetFixup), bufferBypass, 16);
+ break;
+
+ default:
+ _ASSERTE(!"bad operand size");
+ }
+ }
+#endif
+ LOG((LF_CORDB,LL_INFO10000, "DPS::TSS: triggered, about to delete\n"));
+
+ TRACE_FREE(this);
+ Delete();
+ return false;
+}
+
+// * -------------------------------------------------------------------------
+// * DebuggerBreakpoint routines
+// * -------------------------------------------------------------------------
+// DebuggerBreakpoint::DebuggerBreakpoint() The constructor
+// invokes AddBindAndActivatePatch to set the breakpoint
+DebuggerBreakpoint::DebuggerBreakpoint(Module *module,
+ mdMethodDef md,
+ AppDomain *pAppDomain,
+ SIZE_T offset,
+ bool native,
+ SIZE_T ilEnCVersion, // must give the EnC version for non-native bps
+ MethodDesc *nativeMethodDesc, // use only when m_native
+ DebuggerJitInfo *nativeJITInfo, // optional when m_native, null otherwise
+ BOOL *pSucceed
+ )
+ : DebuggerController(NULL, pAppDomain)
+{
+ _ASSERTE(pSucceed != NULL);
+ _ASSERTE(native == (nativeMethodDesc != NULL));
+ _ASSERTE(native || nativeJITInfo == NULL);
+ _ASSERTE(!nativeJITInfo || nativeJITInfo->m_jitComplete); // this is sent by the left-side, and it couldn't have got the code if the JIT wasn't complete
+
+ if (native)
+ {
+ (*pSucceed) = AddBindAndActivateNativeManagedPatch(nativeMethodDesc, nativeJITInfo, offset, LEAF_MOST_FRAME, pAppDomain);
+ return;
+ }
+ else
+ {
+ (*pSucceed) = AddILPatch(pAppDomain, module, md, ilEnCVersion, offset);
+ }
+}
+
+// TP_RESULT DebuggerBreakpoint::TriggerPatch()
+// What: This patch will always be activated.
+// How: return true.
+TP_RESULT DebuggerBreakpoint::TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DB::TP\n"));
+
+ return TPR_TRIGGER;
+}
+
+// void DebuggerBreakpoint::SendEvent() What: Inform
+// the right side that the breakpoint was reached.
+// How: g_pDebugger->SendBreakpoint()
+bool DebuggerBreakpoint::SendEvent(Thread *thread, bool fIpChanged)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ SENDEVENT_CONTRACT_ITEMS;
+ }
+ CONTRACTL_END;
+
+
+ LOG((LF_CORDB, LL_INFO10000, "DB::SE: in DebuggerBreakpoint's SendEvent\n"));
+
+ CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
+
+ // If we got interupted by SetIp, we just don't send the IPC event. Our triggers are still
+ // active so no harm done.
+ if (!fIpChanged)
+ {
+ g_pDebugger->SendBreakpoint(thread, context, this);
+ return true;
+ }
+
+ // Controller is still alive, will fire if we hit the breakpoint again.
+ return false;
+}
+
+//* -------------------------------------------------------------------------
+// * DebuggerStepper routines
+// * -------------------------------------------------------------------------
+
+DebuggerStepper::DebuggerStepper(Thread *thread,
+ CorDebugUnmappedStop rgfMappingStop,
+ CorDebugIntercept interceptStop,
+ AppDomain *appDomain)
+ : DebuggerController(thread, appDomain),
+ m_stepIn(false),
+ m_reason(STEP_NORMAL),
+ m_fpStepInto(LEAF_MOST_FRAME),
+ m_rgfInterceptStop(interceptStop),
+ m_rgfMappingStop(rgfMappingStop),
+ m_range(NULL),
+ m_rangeCount(0),
+ m_realRangeCount(0),
+ m_fp(LEAF_MOST_FRAME),
+#if defined(WIN64EXCEPTIONS)
+ m_fpParentMethod(LEAF_MOST_FRAME),
+#endif // WIN64EXCEPTIONS
+ m_fpException(LEAF_MOST_FRAME),
+ m_fdException(0),
+ m_cFuncEvalNesting(0)
+{
+#ifdef _DEBUG
+ m_fReadyToSend = false;
+#endif
+}
+
+DebuggerStepper::~DebuggerStepper()
+{
+ if (m_range != NULL)
+ {
+ TRACE_FREE(m_range);
+ DeleteInteropSafe(m_range);
+ }
+}
+
+// bool DebuggerStepper::ShouldContinueStep() Return true if
+// the stepper should not stop at this address. The stepper should not
+// stop here if: here is in the {prolog,epilog,etc};
+// and the stepper is not interested in stopping here.
+// We assume that this is being called in the frame which the stepper steps
+// through. Unless, of course, we're returning from a call, in which
+// case we want to stop in the epilog even if the user didn't say so,
+// to prevent stepping out of multiple frames at once.
+// <REVISIT_TODO>Possible optimization: GetJitInfo, then AddPatch @ end of prolog?</REVISIT_TODO>
+bool DebuggerStepper::ShouldContinueStep( ControllerStackInfo *info,
+ SIZE_T nativeOffset)
+{
+ LOG((LF_CORDB,LL_INFO10000, "DeSt::ShContSt: nativeOffset:0x%p \n", nativeOffset));
+ if (m_rgfMappingStop != STOP_ALL && (m_reason != STEP_EXIT) )
+ {
+
+ DebuggerJitInfo *ji = info->m_activeFrame.GetJitInfoFromFrame();
+
+ if ( ji != NULL )
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DeSt::ShContSt: For code 0x%p, got "
+ "DJI 0x%p, from 0x%p to 0x%p\n",
+ (const BYTE*)GetControlPC(&(info->m_activeFrame.registers)),
+ ji, ji->m_addrOfCode, ji->m_addrOfCode+ji->m_sizeOfCode));
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DeSt::ShCoSt: For code 0x%p, didn't "
+ "get DJI\n",(const BYTE*)GetControlPC(&(info->m_activeFrame.registers))));
+
+ return false; // Haven't a clue if we should continue, so
+ // don't
+ }
+ CorDebugMappingResult map = MAPPING_UNMAPPED_ADDRESS;
+ DWORD whichIDontCare;
+ ji->MapNativeOffsetToIL( nativeOffset, &map, &whichIDontCare);
+ unsigned int interestingMappings =
+ (map & ~(MAPPING_APPROXIMATE | MAPPING_EXACT));
+
+ LOG((LF_CORDB,LL_INFO10000,
+ "DeSt::ShContSt: interestingMappings:0x%x m_rgfMappingStop:%x\n",
+ interestingMappings,m_rgfMappingStop));
+
+ // If we're in a prolog,epilog, then we may want to skip
+ // over it or stop
+ if ( interestingMappings )
+ {
+ if ( interestingMappings & m_rgfMappingStop )
+ return false;
+ else
+ return true;
+ }
+ }
+ return false;
+}
+
+bool DebuggerStepper::IsRangeAppropriate(ControllerStackInfo *info)
+{
+ LOG((LF_CORDB,LL_INFO10000, "DS::IRA: info:0x%x \n", info));
+ if (m_range == NULL)
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DS::IRA: m_range == NULL, returning FALSE\n"));
+ return false;
+ }
+
+ FrameInfo *realFrame;
+
+#if defined(WIN64EXCEPTIONS)
+ bool fActiveFrameIsFunclet = info->m_activeFrame.IsNonFilterFuncletFrame();
+
+ if (fActiveFrameIsFunclet)
+ {
+ realFrame = &(info->m_returnFrame);
+ }
+ else
+#endif // WIN64EXCEPTIONS
+ {
+ realFrame = &(info->m_activeFrame);
+ }
+
+ LOG((LF_CORDB,LL_INFO10000, "DS::IRA: info->m_activeFrame.fp:0x%x m_fp:0x%x\n", info->m_activeFrame.fp, m_fp));
+ LOG((LF_CORDB,LL_INFO10000, "DS::IRA: m_fdException:0x%x realFrame->md:0x%x realFrame->fp:0x%x m_fpException:0x%x\n",
+ m_fdException, realFrame->md, realFrame->fp, m_fpException));
+ if ( (info->m_activeFrame.fp == m_fp) ||
+ ( (m_fdException != NULL) && (realFrame->md == m_fdException) &&
+ IsEqualOrCloserToRoot(realFrame->fp, m_fpException) ) )
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DS::IRA: returning TRUE\n"));
+ return true;
+ }
+
+#if defined(WIN64EXCEPTIONS)
+ // There are two scenarios which make this function more complicated on WIN64.
+ // 1) We initiate a step in the parent method or a funclet but end up stepping into another funclet closer to the leaf.
+ // a) start in the parent method
+ // b) start in a funclet
+ // 2) We initiate a step in a funclet but end up stepping out to the parent method or a funclet closer to the root.
+ // a) end up in the parent method
+ // b) end up in a funclet
+ // In both cases the range of the stepper should still be appropriate.
+
+ bool fValidParentMethodFP = (m_fpParentMethod != LEAF_MOST_FRAME);
+
+ if (fActiveFrameIsFunclet)
+ {
+ // Scenario 1a
+ if (m_fp == info->m_returnFrame.fp)
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DS::IRA: returning TRUE\n"));
+ return true;
+ }
+ // Scenario 1b & 2b have the same condition
+ else if (fValidParentMethodFP && (m_fpParentMethod == info->m_returnFrame.fp))
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DS::IRA: returning TRUE\n"));
+ return true;
+ }
+ }
+ else
+ {
+ // Scenario 2a
+ if (fValidParentMethodFP && (m_fpParentMethod == info->m_activeFrame.fp))
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DS::IRA: returning TRUE\n"));
+ return true;
+ }
+ }
+#endif // WIN64EXCEPTIONS
+
+ LOG((LF_CORDB,LL_INFO10000, "DS::IRA: returning FALSE\n"));
+ return false;
+}
+
+// bool DebuggerStepper::IsInRange() Given the native offset ip,
+// returns true if ip falls within any of the native offset ranges specified
+// by the array of COR_DEBUG_STEP_RANGEs.
+// Returns true if ip falls within any of the ranges. Returns false
+// if ip doesn't, or if there are no ranges (rangeCount==0). Note that a
+// COR_DEBUG_STEP_RANGE with an endOffset of zero is interpreted as extending
+// from startOffset to the end of the method.
+// SIZE_T ip: Native offset, relative to the beginning of the method.
+// COR_DEBUG_STEP_RANGE *range: An array of ranges, which are themselves
+// native offsets, to compare against ip.
+// SIZE_T rangeCount: Number of elements in range
+bool DebuggerStepper::IsInRange(SIZE_T ip, COR_DEBUG_STEP_RANGE *range, SIZE_T rangeCount,
+ ControllerStackInfo *pInfo)
+{
+ LOG((LF_CORDB,LL_INFO10000,"DS::IIR: off=0x%x\n", ip));
+
+ if (range == NULL)
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS::IIR: range == NULL -> not in range\n"));
+ return false;
+ }
+
+ if (pInfo && !IsRangeAppropriate(pInfo))
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS::IIR: no pInfo or range not appropriate -> not in range\n"));
+ return false;
+ }
+
+ COR_DEBUG_STEP_RANGE *r = range;
+ COR_DEBUG_STEP_RANGE *rEnd = r + rangeCount;
+
+ while (r < rEnd)
+ {
+ SIZE_T endOffset = r->endOffset ? r->endOffset : ~0;
+ LOG((LF_CORDB,LL_INFO100000,"DS::IIR: so=0x%x, eo=0x%x\n",
+ r->startOffset, endOffset));
+
+ if (ip >= r->startOffset && ip < endOffset)
+ {
+ LOG((LF_CORDB,LL_INFO1000,"DS::IIR:this:0x%x Found native offset "
+ "0x%x to be in the range"
+ "[0x%x, 0x%x), index 0x%x\n\n", this, ip, r->startOffset,
+ endOffset, ((r-range)/sizeof(COR_DEBUG_STEP_RANGE *)) ));
+ return true;
+ }
+
+ r++;
+ }
+
+ LOG((LF_CORDB,LL_INFO10000,"DS::IIR: not in range\n"));
+ return false;
+}
+
+// bool DebuggerStepper::DetectHandleInterceptors() Return true if
+// the current execution takes place within an interceptor (that is, either
+// the current frame, or the parent frame is a framed frame whose
+// GetInterception method returns something other than INTERCEPTION_NONE),
+// and this stepper doesn't want to stop in an interceptor, and we successfully
+// set a breakpoint after the top-most interceptor in the stack.
+bool DebuggerStepper::DetectHandleInterceptors(ControllerStackInfo *info)
+{
+ LOG((LF_CORDB,LL_INFO10000,"DS::DHI: Start DetectHandleInterceptors\n"));
+ LOG((LF_CORDB,LL_INFO10000,"DS::DHI: active frame=0x%08x, has return frame=%d, return frame=0x%08x m_reason:%d\n",
+ info->m_activeFrame.frame, info->HasReturnFrame(), info->m_returnFrame.frame, m_reason));
+
+ // If this is a normal step, then we want to continue stepping, even if we
+ // are in an interceptor.
+ if (m_reason == STEP_NORMAL || m_reason == STEP_RETURN || m_reason == STEP_EXCEPTION_HANDLER)
+ {
+ LOG((LF_CORDB,LL_INFO1000,"DS::DHI: Returning false while stepping within function, finally!\n"));
+ return false;
+ }
+
+ bool fAttemptStepOut = false;
+
+ if (m_rgfInterceptStop != INTERCEPT_ALL) // we may have to skip out of one
+ {
+ if (info->m_activeFrame.frame != NULL &&
+ info->m_activeFrame.frame != FRAME_TOP &&
+ info->m_activeFrame.frame->GetInterception() != Frame::INTERCEPTION_NONE)
+ {
+ if (!((CorDebugIntercept)info->m_activeFrame.frame->GetInterception() & Frame::Interception(m_rgfInterceptStop)))
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS::DHI: Stepping out b/c of excluded frame type:0x%x\n",
+ info->m_returnFrame. frame->GetInterception()));
+
+ fAttemptStepOut = true;
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS::DHI: 0x%x set to STEP_INTERCEPT\n", this));
+
+ m_reason = STEP_INTERCEPT; //remember why we've stopped
+ }
+ }
+
+ if ((m_reason == STEP_EXCEPTION_FILTER) ||
+ (info->HasReturnFrame() &&
+ info->m_returnFrame.frame != NULL &&
+ info->m_returnFrame.frame != FRAME_TOP &&
+ info->m_returnFrame.frame->GetInterception() != Frame::INTERCEPTION_NONE))
+ {
+ if (m_reason == STEP_EXCEPTION_FILTER)
+ {
+ // Exceptions raised inside of the EE by COMPlusThrow, FCThrow, etc will not
+ // insert an ExceptionFrame, and hence info->m_returnFrame.frame->GetInterception()
+ // will not be accurate. Hence we use m_reason instead
+
+ if (!(Frame::INTERCEPTION_EXCEPTION & Frame::Interception(m_rgfInterceptStop)))
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS::DHI: Stepping out b/c of excluded INTERCEPTION_EXCEPTION\n"));
+ fAttemptStepOut = true;
+ }
+ }
+ else if (!(info->m_returnFrame.frame->GetInterception() & Frame::Interception(m_rgfInterceptStop)))
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS::DHI: Stepping out b/c of excluded return frame type:0x%x\n",
+ info->m_returnFrame.frame->GetInterception()));
+
+ fAttemptStepOut = true;
+ }
+
+ if (!fAttemptStepOut)
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS::DHI 0x%x set to STEP_INTERCEPT\n", this));
+
+ m_reason = STEP_INTERCEPT; //remember why we've stopped
+ }
+ }
+ else if (info->m_specialChainReason != CHAIN_NONE)
+ {
+ if(!(info->m_specialChainReason & CorDebugChainReason(m_rgfInterceptStop)) )
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DS::DHI: (special) Stepping out b/c of excluded return frame type:0x%x\n",
+ info->m_specialChainReason));
+
+ fAttemptStepOut = true;
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS::DHI 0x%x set to STEP_INTERCEPT\n", this));
+
+ m_reason = STEP_INTERCEPT; //remember why we've stopped
+ }
+ }
+ else if (info->m_activeFrame.frame == NULL)
+ {
+ // Make sure we are not dealing with a chain here.
+ if (info->m_activeFrame.HasMethodFrame())
+ {
+ // Check whether we are executing in a class constructor.
+ _ASSERTE(info->m_activeFrame.md != NULL);
+ if (info->m_activeFrame.md->IsClassConstructor())
+ {
+ // We are in a class constructor. Check whether we want to stop in it.
+ if (!(CHAIN_CLASS_INIT & CorDebugChainReason(m_rgfInterceptStop)))
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DS::DHI: Stepping out b/c of excluded cctor:0x%x\n",
+ CHAIN_CLASS_INIT));
+
+ fAttemptStepOut = true;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000,"DS::DHI 0x%x set to STEP_INTERCEPT\n", this));
+
+ m_reason = STEP_INTERCEPT; //remember why we've stopped
+ }
+ }
+ }
+ }
+ }
+
+ if (fAttemptStepOut)
+ {
+ LOG((LF_CORDB,LL_INFO1000,"DS::DHI: Doing TSO!\n"));
+
+ // TrapStepOut could alter the step reason if we're stepping out of an inteceptor and it looks like we're
+ // running off the top of the program. So hold onto it here, and if our step reason becomes STEP_EXIT, then
+ // reset it to what it was.
+ CorDebugStepReason holdReason = m_reason;
+
+ // @todo - should this be TrapStepNext??? But that may stop in a child...
+ TrapStepOut(info);
+ EnableUnwind(m_fp);
+
+ if (m_reason == STEP_EXIT)
+ {
+ m_reason = holdReason;
+ }
+
+ return true;
+ }
+
+ // We're not in a special area of code, so we don't want to continue unless some other part of the code decides that
+ // we should.
+ LOG((LF_CORDB,LL_INFO1000,"DS::DHI: Returning false, finally!\n"));
+
+ return false;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// This function checks whether the given IP is in an LCG method. If so, it enables
+// JMC and does a step out. This effectively makes sure that we never stop in an LCG method.
+//
+// There are two common scnearios here:
+// 1) We single-step into an LCG method from a managed method.
+// 2) We single-step off the end of a method called by an LCG method and end up in the calling LCG method.
+//
+// In both cases, we don't want to stop in the LCG method. If the LCG method directly or indirectly calls
+// another user method, we want to stop there. Otherwise, we just want to step out back to the caller of
+// LCG method. In other words, what we want is exactly the JMC behaviour.
+//
+// Arguments:
+// ip - the current IP where the thread is stopped at
+// pMD - This is the MethodDesc for the specified ip. This can be NULL, but if it's not,
+// then it has to match the specified IP.
+// pInfo - the ControllerStackInfo taken at the specified IP (see Notes below)
+//
+// Return Value:
+// Returns TRUE if the specified IP is indeed in an LCG method, in which case this function has already
+// enabled all the traps to catch the thread, including turning on JMC, enabling unwind callback, and
+// putting a patch in the caller.
+//
+// Notes:
+// LCG methods don't show up in stackwalks done by the ControllerStackInfo. So even if the specified IP
+// is in an LCG method, the LCG method won't show up in the call strack. That's why we need to call
+// ControllerStackInfo::SetReturnFrameWithActiveFrame() in this function before calling TrapStepOut().
+// Otherwise TrapStepOut() will put a patch in the caller's caller (if there is one).
+//
+
+BOOL DebuggerStepper::DetectHandleLCGMethods(const PCODE ip, MethodDesc * pMD, ControllerStackInfo * pInfo)
+{
+ // Look up the MethodDesc for the given IP.
+ if (pMD == NULL)
+ {
+ if (g_pEEInterface->IsManagedNativeCode((const BYTE *)ip))
+ {
+ pMD = g_pEEInterface->GetNativeCodeMethodDesc(ip);
+ _ASSERTE(pMD != NULL);
+ }
+ }
+#if defined(_DEBUG)
+ else
+ {
+ // If a MethodDesc is specified, it has to match the given IP.
+ _ASSERTE(pMD == g_pEEInterface->GetNativeCodeMethodDesc(ip));
+ }
+#endif // _DEBUG
+
+ // If the given IP is in unmanaged code, then we won't have a MethodDesc by this point.
+ if (pMD != NULL)
+ {
+ if (pMD->IsLCGMethod())
+ {
+ // Enable all the traps to catch the thread.
+ EnableUnwind(m_fp);
+ EnableJMCBackStop(pMD);
+
+ pInfo->SetReturnFrameWithActiveFrame();
+ TrapStepOut(pInfo);
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+
+
+// Steppers override these so that they can skip func-evals. Note that steppers can
+// be created & used inside of func-evals (nested-break states).
+// On enter, we check for freezing the stepper.
+void DebuggerStepper::TriggerFuncEvalEnter(Thread * thread)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DS::TFEEnter, this=0x%p, old nest=%d\n", this, m_cFuncEvalNesting));
+
+ // Since this is always called on the hijacking thread, we should be thread-safe
+ _ASSERTE(thread == this->GetThread());
+
+ if (IsDead())
+ return;
+
+ m_cFuncEvalNesting++;
+
+ if (m_cFuncEvalNesting == 1)
+ {
+ // We're entering our 1st funceval, so freeze us.
+ LOG((LF_CORDB, LL_INFO100000, "DS::TFEEnter - freezing stepper\n"));
+
+ // Freeze the stepper by disabling all triggers
+ m_bvFrozenTriggers = 0;
+
+ //
+ // We dont explicitly disable single-stepping because the OS
+ // gives us a new thread context during an exception. Since
+ // all func-evals are done inside exceptions, we should never
+ // have this problem.
+ //
+ // Note: however, that if func-evals were no longer done in
+ // exceptions, this would have to change.
+ //
+
+
+ if (IsMethodEnterEnabled())
+ {
+ m_bvFrozenTriggers |= kMethodEnter;
+ DisableMethodEnter();
+ }
+
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO100000, "DS::TFEEnter - new nest=%d\n", m_cFuncEvalNesting));
+ }
+}
+
+// On Func-EvalExit, we check if the stepper is trying to step-out of a func-eval
+// (in which case we kill it)
+// or if we previously entered this func-eval and should thaw it now.
+void DebuggerStepper::TriggerFuncEvalExit(Thread * thread)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DS::TFEExit, this=0x%p, old nest=%d\n", this, m_cFuncEvalNesting));
+
+ // Since this is always called on the hijacking thread, we should be thread-safe
+ _ASSERTE(thread == this->GetThread());
+
+ if (IsDead())
+ return;
+
+ m_cFuncEvalNesting--;
+
+ if (m_cFuncEvalNesting == -1)
+ {
+ LOG((LF_CORDB, LL_INFO100000, "DS::TFEExit - disabling stepper\n"));
+
+ // we're exiting the func-eval session we were created in. So we just completely
+ // disable ourselves so that we don't fire anything anymore.
+ // The RS still has to free the stepper though.
+
+ // This prevents us from stepping-out of a func-eval. For traditional steppers,
+ // this is overkill since it won't have any outstanding triggers. (trap-step-out
+ // won't patch if it crosses a func-eval frame).
+ // But JMC-steppers have Method-Enter; and so this is the only place we have to
+ // disable that.
+ DisableAll();
+ }
+ else if (m_cFuncEvalNesting == 0)
+ {
+ // We're back to our starting Func-eval session, we should have been frozen,
+ // so now we thaw.
+ LOG((LF_CORDB, LL_INFO100000, "DS::TFEExit - thawing stepper\n"));
+
+ // Thaw the stepper (reenable triggers)
+ if ((m_bvFrozenTriggers & kMethodEnter) != 0)
+ {
+ EnableMethodEnter();
+ }
+ m_bvFrozenTriggers = 0;
+
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO100000, "DS::TFEExit - new nest=%d\n", m_cFuncEvalNesting));
+ }
+}
+
+
+// Return true iff we set a patch (which implies to caller that we should
+// let controller run free and hit that patch)
+bool DebuggerStepper::TrapStepInto(ControllerStackInfo *info,
+ const BYTE *ip,
+ TraceDestination *pTD)
+{
+ _ASSERTE( pTD != NULL );
+ _ASSERTE(this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER);
+
+ EnableTraceCall(LEAF_MOST_FRAME);
+ if (IsCloserToRoot(info->m_activeFrame.fp, m_fpStepInto))
+ m_fpStepInto = info->m_activeFrame.fp;
+
+ LOG((LF_CORDB, LL_INFO1000, "Ds::TSI this:0x%x m_fpStepInto:0x%x\n",
+ this, m_fpStepInto.GetSPValue()));
+
+ TraceDestination trace;
+
+ // Trace through the stubs.
+ // If we're calling from managed code, this should either succeed
+ // or become an ecall into mscorwks.
+ // @Todo - what about stubs in mscorwks.
+ // @todo - if this fails, we want to provde as much info as possible.
+ if (!g_pEEInterface->TraceStub(ip, &trace)
+ || !g_pEEInterface->FollowTrace(&trace))
+ {
+ return false;
+ }
+
+
+ (*pTD) = trace; //bitwise copy
+
+ // Step-in always operates at the leaf-most frame. Thus the frame pointer for any
+ // patch for step-in should be LEAF_MOST_FRAME, regardless of whatever our current fp
+ // is before the step-in.
+ // Note that step-in may skip 'internal' frames (FrameInfo w/ internal=true) since
+ // such frames may really just be a marker for an internal EE Frame on the stack.
+ // However, step-out uses these frames b/c it may call frame->TraceFrame() on them.
+ return PatchTrace(&trace,
+ LEAF_MOST_FRAME, // step-in is always leaf-most frame.
+ (m_rgfMappingStop&STOP_UNMANAGED)?(true):(false));
+}
+
+// Enable the JMC backstop for stepping on Step-In.
+// This activate the JMC probes, which will provide a safety net
+// to stop a stepper if the StubManagers don't predict the call properly.
+// Ideally, this should never be necessary (because the SMs would do their job).
+void DebuggerStepper::EnableJMCBackStop(MethodDesc * pStartMethod)
+{
+ // JMC steppers should not need the JMC backstop unless a thread inadvertently stops in an LCG method.
+ //_ASSERTE(DEBUGGER_CONTROLLER_JMC_STEPPER != this->GetDCType());
+
+ // Since we should never hit the JMC backstop (since it's really a SM issue), we'll assert if we actually do.
+ // However, there's 1 corner case here. If we trace calls at the start of the method before the JMC-probe,
+ // then we'll still hit the JMC backstop in our own method.
+ // Record that starting method. That way, if we end up hitting our JMC backstop in our own method,
+ // we don't over aggressively fire the assert. (This won't work for recursive cases, but since this is just
+ // changing an assert, we don't care).
+
+#ifdef _DEBUG
+ // May be NULL if we didn't start in a method.
+ m_StepInStartMethod = pStartMethod;
+#endif
+
+ // We don't want traditional steppers to rely on MethodEnter (b/c it's not guaranteed to be correct),
+ // but it may be a useful last resort.
+ this->EnableMethodEnter();
+}
+
+// Return true if the stepper can run free.
+bool DebuggerStepper::TrapStepInHelper(
+ ControllerStackInfo * pInfo,
+ const BYTE * ipCallTarget,
+ const BYTE * ipNext,
+ bool fCallingIntoFunclet)
+{
+ TraceDestination td;
+
+#ifdef _DEBUG
+ // Begin logging the step-in activity in debug builds.
+ StubManager::DbgBeginLog((TADDR) ipNext, (TADDR) ipCallTarget);
+#endif
+
+
+ if (TrapStepInto(pInfo, ipCallTarget, &td))
+ {
+ // If we placed a patch, see if we need to update our step-reason
+ if (td.GetTraceType() == TRACE_MANAGED )
+ {
+ // Possible optimization: Roll all of g_pEEInterface calls into
+ // one function so we don't repeatedly get the CodeMan,etc
+ MethodDesc *md = NULL;
+ _ASSERTE( g_pEEInterface->IsManagedNativeCode((const BYTE *)td.GetAddress()) );
+ md = g_pEEInterface->GetNativeCodeMethodDesc(td.GetAddress());
+
+ if ( g_pEEInterface->GetFunctionAddress(md) == td.GetAddress())
+ {
+
+ LOG((LF_CORDB,LL_INFO1000,"\tDS::TS 0x%x m_reason = STEP_CALL"
+ "@ip0x%x\n", this, (BYTE*)GetControlPC(&(pInfo->m_activeFrame.registers))));
+ m_reason = STEP_CALL;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO1000, "Didn't step: md:0x%x"
+ "td.type:%s td.address:0x%x, gfa:0x%x\n",
+ md, GetTType(td.GetTraceType()), td.GetAddress(),
+ g_pEEInterface->GetFunctionAddress(md)));
+ }
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS::TS else 0x%x m_reason = STEP_CALL\n",
+ this));
+ m_reason = STEP_CALL;
+ }
+
+
+ return true;
+ } // end TrapStepIn
+ else
+ {
+ // If we can't figure out where the stepper should call into (likely because we can't find a stub-manager),
+ // then enable the JMC backstop.
+ EnableJMCBackStop(pInfo->m_activeFrame.md);
+
+ }
+
+ // We ignore ipNext here. Instead we'll return false and let the caller (TrapStep)
+ // set the patch for us.
+ return false;
+}
+
+FORCEINLINE bool IsTailCall(const BYTE * pTargetIP)
+{
+ return TailCallStubManager::IsTailCallStubHelper(reinterpret_cast<PCODE>(pTargetIP));
+}
+
+// bool DebuggerStepper::TrapStep() TrapStep attepts to set a
+// patch at the next IL instruction to be executed. If we're stepping in &
+// the next IL instruction is a call, then this'll set a breakpoint inside
+// the code that will be called.
+// How: There are a number of cases, depending on where the IP
+// currently is:
+// Unmanaged code: EnableTraceCall() & return false - try and get
+// it when it returns.
+// In a frame: if the <p in> param is true, then do an
+// EnableTraceCall(). If the frame isn't the top frame, also do
+// g_pEEInterface->TraceFrame(), g_pEEInterface->FollowTrace, and
+// PatchTrace.
+// Normal managed frame: create a Walker and walk the instructions until either
+// leave the provided range (AddPatch there, return true), or we don't know what the
+// next instruction is (say, after a call, or return, or branch - return false).
+// Returns a boolean indicating if we were able to set a patch successfully
+// in either this method, or (if in == true & the next instruction is a call)
+// inside a callee method.
+// true: Patch successfully placed either in this method or a callee,
+// so the stepping is taken care of.
+// false: Unable to place patch in either this method or any
+// applicable callee methods, so the only option the caller has to put
+// patch to control flow is to call TrapStepOut & try and place a patch
+// on the method that called the current frame's method.
+bool DebuggerStepper::TrapStep(ControllerStackInfo *info, bool in)
+{
+ LOG((LF_CORDB,LL_INFO10000,"DS::TS: this:0x%x\n", this));
+ if (!info->m_activeFrame.managed)
+ {
+ //
+ // We're not in managed code. Patch up all paths back in.
+ //
+
+ LOG((LF_CORDB,LL_INFO10000, "DS::TS: not in managed code\n"));
+
+ if (in)
+ {
+ EnablePolyTraceCall();
+ }
+
+ return false;
+ }
+
+ if (info->m_activeFrame.frame != NULL)
+ {
+
+ //
+ // We're in some kind of weird frame. Patch further entry to the frame.
+ // or if we can't, patch return from the frame
+ //
+
+ LOG((LF_CORDB,LL_INFO10000, "DS::TS: in a weird frame\n"));
+
+ if (in)
+ {
+ EnablePolyTraceCall();
+
+ // Only traditional steppers should patch a frame. JMC steppers will
+ // just rely on TriggerMethodEnter.
+ if (DEBUGGER_CONTROLLER_STEPPER == this->GetDCType())
+ {
+ if (info->m_activeFrame.frame != FRAME_TOP)
+ {
+ TraceDestination trace;
+
+ CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers
+
+ // This could be anywhere, especially b/c step could be on non-leaf frame.
+ if (g_pEEInterface->TraceFrame(this->GetThread(),
+ info->m_activeFrame.frame,
+ FALSE, &trace,
+ &(info->m_activeFrame.registers))
+ && g_pEEInterface->FollowTrace(&trace)
+ && PatchTrace(&trace, info->m_activeFrame.fp,
+ (m_rgfMappingStop&STOP_UNMANAGED)?
+ (true):(false)))
+
+ {
+ return true;
+ }
+ }
+ }
+ }
+
+ return false;
+ }
+
+#ifdef _TARGET_X86_
+ LOG((LF_CORDB,LL_INFO1000, "GetJitInfo for pc = 0x%x (addr of "
+ "that value:0x%x)\n", (const BYTE*)(GetControlPC(&info->m_activeFrame.registers)),
+ info->m_activeFrame.registers.PCTAddr));
+#endif
+
+ // Note: we used to pass in the IP from the active frame to GetJitInfo, but there seems to be no value in that, and
+ // it was causing problems creating a stepper while sitting in ndirect stubs after we'd returned from the unmanaged
+ // function that had been called.
+ DebuggerJitInfo *ji = info->m_activeFrame.GetJitInfoFromFrame();
+ if( ji != NULL )
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS::TS: For code 0x%p, got DJI 0x%p, "
+ "from 0x%p to 0x%p\n",
+ (const BYTE*)(GetControlPC(&info->m_activeFrame.registers)),
+ ji, ji->m_addrOfCode, ji->m_addrOfCode+ji->m_sizeOfCode));
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS::TS: For code 0x%p, "
+ "didn't get a DJI \n",
+ (const BYTE*)(GetControlPC(&info->m_activeFrame.registers))));
+ }
+
+ //
+ // We're in a normal managed frame - walk the code
+ //
+
+ NativeWalker walker;
+
+ LOG((LF_CORDB,LL_INFO1000, "DS::TS: &info->m_activeFrame.registers 0x%p\n", &info->m_activeFrame.registers));
+
+ // !!! Eventually when using the fjit, we'll want
+ // to walk the IL to get the next location, & then map
+ // it back to native.
+ walker.Init((BYTE*)GetControlPC(&(info->m_activeFrame.registers)), &info->m_activeFrame.registers);
+
+
+ // Is the active frame really the active frame?
+ // What if the thread is stopped at a managed debug event outside of a filter ctx? Eg, stopped
+ // somewhere directly in mscorwks (like sending a LogMsg or ClsLoad event) or even at WaitForSingleObject.
+ // ActiveFrame is either the stepper's initial frame or the frame of a filterctx.
+ bool fIsActivFrameLive = (info->m_activeFrame.fp == info->m_bottomFP);
+
+ // If this thread isn't stopped in managed code, it can't be at the active frame.
+ if (GetManagedStoppedCtx(this->GetThread()) == NULL)
+ {
+ fIsActivFrameLive = false;
+ }
+
+ bool fIsJump = false;
+ bool fCallingIntoFunclet = false;
+
+ // If m_activeFrame is not the actual active frame,
+ // we should skip this first switch - never single step, and
+ // assume our context is bogus.
+ if (fIsActivFrameLive)
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DC::TS: immediate?\n"));
+
+ // Note that by definition our walker must always be able to step
+ // through a single instruction, so any return
+ // of NULL IP's from those cases on the first step
+ // means that an exception is going to be generated.
+ //
+ // (On future steps, it can also mean that the destination
+ // simply can't be computed.)
+ WALK_TYPE wt = walker.GetOpcodeWalkType();
+ {
+ switch (wt)
+ {
+ case WALK_RETURN:
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DC::TS:Imm:WALK_RETURN\n"));
+
+ // Normally a 'ret' opcode means we're at the end of a function and doing a step-out.
+ // But the jit is free to use a 'ret' opcode to implement various goofy constructs like
+ // managed filters, in which case we may ret to the same function or we may ret to some
+ // internal CLR stub code.
+ // So we'll just ignore this and tell the Stepper to enable every notification it has
+ // and let the thread run free. This will include TrapStepOut() and EnableUnwind()
+ // to catch any potential filters.
+
+
+ // Go ahead and enable the single-step flag too. We know it's safe.
+ // If this lands in random code, then TriggerSingleStep will just ignore it.
+ EnableSingleStep();
+
+ // Don't set step-reason yet. If another trigger gets hit, it will set the reason.
+ return false;
+ }
+
+ case WALK_BRANCH:
+ LOG((LF_CORDB,LL_INFO10000, "DC::TS:Imm:WALK_BRANCH\n"));
+ // A branch can be handled just like a call. If the branch is within the current method, then we just
+ // down to WALK_UNKNOWN, otherwise we handle it just like a call. Note: we need to force in=true
+ // because for a jmp, in or over is the same thing, we're still going there, and the in==true case is
+ // the case we want to use...
+ fIsJump = true;
+
+ // fall through...
+
+ case WALK_CALL:
+ LOG((LF_CORDB,LL_INFO10000, "DC::TS:Imm:WALK_CALL ip=%p nextip=%p\n", walker.GetIP(), walker.GetNextIP()));
+
+ // If we're doing some sort of intra-method jump (usually, to get EIP in a clever way, via the CALL
+ // instruction), then put the bp where we're going, NOT at the instruction following the call
+ if (IsAddrWithinFrame(ji, info->m_activeFrame.md, walker.GetIP(), walker.GetNextIP()))
+ {
+ LOG((LF_CORDB, LL_INFO1000, "Walk call within method!" ));
+ goto LWALK_UNKNOWN;
+ }
+
+ if (walker.GetNextIP() != NULL)
+ {
+#ifdef WIN64EXCEPTIONS
+ // There are 4 places we could be jumping:
+ // 1) to the beginning of the same method (recursive call)
+ // 2) somewhere in the same funclet, that isn't the method start
+ // 3) somewhere in the same method but different funclet
+ // 4) somewhere in a different method
+ //
+ // IsAddrWithinFrame ruled out option 2, IsAddrWithinMethodIncludingFunclet rules out option 4,
+ // and checking the IP against the start address rules out option 1. That leaves option only what we
+ // wanted, option #3
+ fCallingIntoFunclet = IsAddrWithinMethodIncludingFunclet(ji, info->m_activeFrame.md, walker.GetNextIP()) &&
+ ((CORDB_ADDRESS)(SIZE_T)walker.GetNextIP() != ji->m_addrOfCode);
+#endif
+ // At this point, we know that the call/branch target is not in the current method.
+ // So if the current instruction is a jump, this must be a tail call or possibly a jump to the finally.
+ // So, check if the call/branch target is the JIT helper for handling tail calls if we are not calling
+ // into the funclet.
+ if ((fIsJump && !fCallingIntoFunclet) || IsTailCall(walker.GetNextIP()))
+ {
+ // A step-over becomes a step-out for a tail call.
+ if (!in)
+ {
+ TrapStepOut(info);
+ return true;
+ }
+ }
+
+ // To preserve the old behaviour, if this is not a tail call, then we assume we want to
+ // follow the call/jump.
+ if (fIsJump)
+ {
+ in = true;
+ }
+
+
+ // There are two cases where we need to perform a step-in. One, if the step operation is
+ // a step-in. Two, if the target address of the call is in a funclet of the current method.
+ // In this case, we want to step into the funclet even if the step operation is a step-over.
+ if (in || fCallingIntoFunclet)
+ {
+ if (TrapStepInHelper(info, walker.GetNextIP(), walker.GetSkipIP(), fCallingIntoFunclet))
+ {
+ return true;
+ }
+ }
+
+ }
+ if (walker.GetSkipIP() == NULL)
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS::TS 0x%x m_reason = STEP_CALL (skip)\n",
+ this));
+ m_reason = STEP_CALL;
+
+ return true;
+ }
+
+
+ LOG((LF_CORDB,LL_INFO100000, "DC::TS:Imm:WALK_CALL Skip instruction\n"));
+ walker.Skip();
+ break;
+
+ case WALK_UNKNOWN:
+ LWALK_UNKNOWN:
+ LOG((LF_CORDB,LL_INFO10000,"DS::TS:WALK_UNKNOWN - curIP:0x%x "
+ "nextIP:0x%x skipIP:0x%x 1st byte of opcode:0x%x\n", (BYTE*)GetControlPC(&(info->m_activeFrame.
+ registers)), walker.GetNextIP(),walker.GetSkipIP(),
+ *(BYTE*)GetControlPC(&(info->m_activeFrame.registers))));
+
+ EnableSingleStep();
+
+ return true;
+
+ default:
+ if (walker.GetNextIP() == NULL)
+ {
+ return true;
+ }
+
+ walker.Next();
+ }
+ }
+ } // if (fIsActivFrameLive)
+
+ //
+ // Use our range, if we're in the original
+ // frame.
+ //
+
+ COR_DEBUG_STEP_RANGE *range;
+ SIZE_T rangeCount;
+
+ if (info->m_activeFrame.fp == m_fp)
+ {
+ range = m_range;
+ rangeCount = m_rangeCount;
+ }
+ else
+ {
+ range = NULL;
+ rangeCount = 0;
+ }
+
+ //
+ // Keep walking until either we're out of range, or
+ // else we can't predict ahead any more.
+ //
+
+ while (TRUE)
+ {
+ const BYTE *ip = walker.GetIP();
+
+ SIZE_T offset = CodeRegionInfo::GetCodeRegionInfo(ji, info->m_activeFrame.md).AddressToOffset(ip);
+
+ LOG((LF_CORDB, LL_INFO1000, "Walking to ip 0x%x (natOff:0x%x)\n",ip,offset));
+
+ if (!IsInRange(offset, range, rangeCount)
+ && !ShouldContinueStep( info, offset ))
+ {
+ AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
+ ji,
+ offset,
+ info->m_returnFrame.fp,
+ NULL);
+ return true;
+ }
+
+ switch (walker.GetOpcodeWalkType())
+ {
+ case WALK_RETURN:
+
+ LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_RETURN Adding Patch.\n"));
+
+ // In the loop above, if we're at the return address, we'll check & see
+ // if we're returning to elsewhere within the same method, and if so,
+ // we'll single step rather than TrapStepOut. If we see a return in the
+ // code stream, then we'll set a breakpoint there, so that we can
+ // examine the return address, and decide whether to SS or TSO then
+ AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
+ ji,
+ offset,
+ info->m_returnFrame.fp,
+ NULL);
+ return true;
+
+ case WALK_CALL:
+
+ LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL.\n"));
+
+ // If we're doing some sort of intra-method jump (usually, to get EIP in a clever way, via the CALL
+ // instruction), then put the bp where we're going, NOT at the instruction following the call
+ if (IsAddrWithinFrame(ji, info->m_activeFrame.md, walker.GetIP(), walker.GetNextIP()))
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL IsAddrWithinFrame, Adding Patch.\n"));
+
+ // How else to detect this?
+ AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
+ ji,
+ CodeRegionInfo::GetCodeRegionInfo(ji, info->m_activeFrame.md).AddressToOffset(walker.GetNextIP()),
+ info->m_returnFrame.fp,
+ NULL);
+ return true;
+ }
+
+ if (IsTailCall(walker.GetNextIP()))
+ {
+ if (!in)
+ {
+ AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
+ ji,
+ offset,
+ info->m_returnFrame.fp,
+ NULL);
+ return true;
+ }
+ }
+
+#ifdef WIN64EXCEPTIONS
+ fCallingIntoFunclet = IsAddrWithinMethodIncludingFunclet(ji, info->m_activeFrame.md, walker.GetNextIP());
+#endif
+ if (in || fCallingIntoFunclet)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL step in is true\n"));
+ if (walker.GetNextIP() == NULL)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL NextIP == NULL\n"));
+ AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
+ ji,
+ offset,
+ info->m_returnFrame.fp,
+ NULL);
+
+ LOG((LF_CORDB,LL_INFO10000,"DS0x%x m_reason=STEP_CALL 2\n",
+ this));
+ m_reason = STEP_CALL;
+
+ return true;
+ }
+
+ if (TrapStepInHelper(info, walker.GetNextIP(), walker.GetSkipIP(), fCallingIntoFunclet))
+ {
+ return true;
+ }
+
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL Calling GetSkipIP\n"));
+ if (walker.GetSkipIP() == NULL)
+ {
+ AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
+ ji,
+ offset,
+ info->m_returnFrame.fp,
+ NULL);
+
+ LOG((LF_CORDB,LL_INFO10000,"DS 0x%x m_reason=STEP_CALL4\n",this));
+ m_reason = STEP_CALL;
+
+ return true;
+ }
+
+ walker.Skip();
+ LOG((LF_CORDB, LL_INFO10000, "DS::TS: skipping over call.\n"));
+ break;
+
+ default:
+ if (walker.GetNextIP() == NULL)
+ {
+ AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
+ ji,
+ offset,
+ info->m_returnFrame.fp,
+ NULL);
+ return true;
+ }
+ walker.Next();
+ break;
+ }
+ }
+ LOG((LF_CORDB,LL_INFO1000,"Ending TrapStep\n"));
+}
+
+bool DebuggerStepper::IsAddrWithinFrame(DebuggerJitInfo *dji,
+ MethodDesc* pMD,
+ const BYTE* currentAddr,
+ const BYTE* targetAddr)
+{
+ _ASSERTE(dji != NULL);
+
+ bool result = IsAddrWithinMethodIncludingFunclet(dji, pMD, targetAddr);
+
+ // We need to check if this is a recursive call. In RTM we should see if this method is really necessary,
+ // since it looks like the X86 JIT doesn't emit intra-method jumps anymore.
+ if (result)
+ {
+ if ((CORDB_ADDRESS)(SIZE_T)targetAddr == dji->m_addrOfCode)
+ {
+ result = false;
+ }
+ }
+
+#if defined(WIN64EXCEPTIONS)
+ // On WIN64, we also check whether the targetAddr and the currentAddr is in the same funclet.
+ _ASSERTE(currentAddr != NULL);
+ if (result)
+ {
+ int currentFuncletIndex = dji->GetFuncletIndex((CORDB_ADDRESS)currentAddr, DebuggerJitInfo::GFIM_BYADDRESS);
+ int targetFuncletIndex = dji->GetFuncletIndex((CORDB_ADDRESS)targetAddr, DebuggerJitInfo::GFIM_BYADDRESS);
+ result = (currentFuncletIndex == targetFuncletIndex);
+ }
+#endif // WIN64EXCEPTIONS
+
+ return result;
+}
+
+// x86 shouldn't need to call this method directly. We should call IsAddrWithinFrame() on x86 instead.
+// That's why I use a name with the word "funclet" in it to scare people off.
+bool DebuggerStepper::IsAddrWithinMethodIncludingFunclet(DebuggerJitInfo *dji,
+ MethodDesc* pMD,
+ const BYTE* targetAddr)
+{
+ _ASSERTE(dji != NULL);
+ return CodeRegionInfo::GetCodeRegionInfo(dji, pMD).IsMethodAddress(targetAddr);
+}
+
+void DebuggerStepper::TrapStepNext(ControllerStackInfo *info)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DS::TrapStepNext, this=%p\n", this));
+ // StepNext for a Normal stepper is just a step-out
+ TrapStepOut(info);
+
+ // @todo -should we also EnableTraceCall??
+}
+
+// Is this frame interesting?
+// For a traditional stepper, all frames are interesting.
+bool DebuggerStepper::IsInterestingFrame(FrameInfo * pFrame)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return true;
+}
+
+// Place a single patch somewhere up the stack to do a step-out
+void DebuggerStepper::TrapStepOut(ControllerStackInfo *info, bool fForceTraditional)
+{
+ ControllerStackInfo returnInfo;
+ DebuggerJitInfo *dji;
+
+ LOG((LF_CORDB, LL_INFO10000, "DS::TSO this:0x%p\n", this));
+
+ bool fReturningFromFinallyFunclet = false;
+
+#if defined(WIN64EXCEPTIONS)
+ // When we step out of a funclet, we should do one of two things, depending
+ // on the original stepping intention:
+ // 1) If we originally want to step out, then we should skip the parent method.
+ // 2) If we originally want to step in/over but we step off the end of the funclet,
+ // then we should resume in the parent, if possible.
+ if (info->m_activeFrame.IsNonFilterFuncletFrame())
+ {
+ // There should always be a frame for the parent method.
+ _ASSERTE(info->HasReturnFrame());
+
+#ifdef _TARGET_ARM_
+ while (info->HasReturnFrame() && info->m_activeFrame.md != info->m_returnFrame.md)
+ {
+ StackTraceTicket ticket(info);
+ returnInfo.GetStackInfo(ticket, GetThread(), info->m_returnFrame.fp, NULL);
+ info = &returnInfo;
+ }
+
+ _ASSERTE(info->HasReturnFrame());
+#endif
+
+ _ASSERTE(info->m_activeFrame.md == info->m_returnFrame.md);
+
+ if (m_eMode == cStepOut)
+ {
+ StackTraceTicket ticket(info);
+ returnInfo.GetStackInfo(ticket, GetThread(), info->m_returnFrame.fp, NULL);
+ info = &returnInfo;
+ }
+ else
+ {
+ _ASSERTE(info->m_returnFrame.managed);
+ _ASSERTE(info->m_returnFrame.frame == NULL);
+
+ MethodDesc *md = info->m_returnFrame.md;
+ dji = info->m_returnFrame.GetJitInfoFromFrame();
+
+ // The return value of a catch funclet is the control PC to resume to.
+ // The return value of a finally funclet has no meaning, so we need to check
+ // if the return value is in the main method.
+ LPVOID resumePC = GetRegdisplayReturnValue(&(info->m_activeFrame.registers));
+
+ // For finally funclet, there are two possible situations. Either the finally is
+ // called normally (i.e. no exception), in which case we simply fall through and
+ // let the normal loop do its work below, or the finally is called by the EH
+ // routines, in which case we need the unwind notification.
+ if (IsAddrWithinMethodIncludingFunclet(dji, md, (const BYTE *)resumePC))
+ {
+ SIZE_T reloffset = dji->m_codeRegionInfo.AddressToOffset((BYTE*)resumePC);
+
+ AddBindAndActivateNativeManagedPatch(info->m_returnFrame.md,
+ dji,
+ reloffset,
+ info->m_returnFrame.fp,
+ NULL);
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "DS::TSO:normally managed code AddPatch"
+ " in %s::%s, offset 0x%x, m_reason=%d\n",
+ info->m_returnFrame.md->m_pszDebugClassName,
+ info->m_returnFrame.md->m_pszDebugMethodName,
+ reloffset, m_reason));
+
+ // Do not set m_reason to STEP_RETURN here. Logically, the funclet and the parent method are the
+ // same method, so we should not "return" to the parent method.
+ LOG((LF_CORDB, LL_INFO10000,"DS::TSO: done\n"));
+
+ return;
+ }
+ else
+ {
+ // This is the case where we step off the end of a finally funclet.
+ fReturningFromFinallyFunclet = true;
+ }
+ }
+ }
+#endif // WIN64EXCEPTIONS
+
+#ifdef _DEBUG
+ FramePointer dbgLastFP; // for debug, make sure we're making progress through the stack.
+#endif
+
+ while (info->HasReturnFrame())
+ {
+
+#ifdef _DEBUG
+ dbgLastFP = info->m_activeFrame.fp;
+#endif
+
+ // Continue walking up the stack & set a patch upon the next
+ // frame up. We will eventually either hit managed code
+ // (which we can set a definite patch in), or the top of the
+ // stack.
+ StackTraceTicket ticket(info);
+
+ // The last parameter here is part of a really targetted (*cough* dirty) fix to
+ // disable getting an unwanted UMChain to fix issue 650903 (See
+ // code:ControllerStackInfo::WalkStack and code:TrackUMChain for the other
+ // parts.) In the case of managed step out we know that we aren't interested in
+ // unmanaged frames, and generating that unmanaged frame causes the stackwalker
+ // not to report the managed frame that was at the same SP. However the unmanaged
+ // frame might be used in the mixed-mode step out case so I don't suppress it
+ // there.
+ returnInfo.GetStackInfo(ticket, GetThread(), info->m_returnFrame.fp, NULL, !(m_rgfMappingStop & STOP_UNMANAGED));
+ info = &returnInfo;
+
+#ifdef _DEBUG
+ // If this assert fires, then it means that we're not making progress while
+ // tracing up the towards the root of the stack. Likely an issue in the Left-Side's
+ // stackwalker.
+ _ASSERTE(IsCloserToLeaf(dbgLastFP, info->m_activeFrame.fp));
+#endif
+
+#ifdef FEATURE_STUBS_AS_IL
+ if (info->m_activeFrame.md->IsILStub() && info->m_activeFrame.md->AsDynamicMethodDesc()->IsMulticastStub())
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "DS::TSO: multicast frame.\n"));
+
+ // User break should always be called from managed code, so it should never actually hit this codepath.
+ _ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT);
+
+ // JMC steppers shouldn't be patching stubs.
+ if (DEBUGGER_CONTROLLER_JMC_STEPPER == this->GetDCType())
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DS::TSO: JMC stepper skipping frame.\n"));
+ continue;
+ }
+
+ TraceDestination trace;
+
+ EnableTraceCall(info->m_activeFrame.fp);
+
+ PCODE ip = GetControlPC(&(info->m_activeFrame.registers));
+ if (g_pEEInterface->TraceStub((BYTE*)ip, &trace)
+ && g_pEEInterface->FollowTrace(&trace)
+ && PatchTrace(&trace, info->m_activeFrame.fp,
+ true))
+ break;
+ }
+ else
+#endif // FEATURE_STUBS_AS_IL
+ if (info->m_activeFrame.managed)
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "DS::TSO: return frame is managed.\n"));
+
+ if (info->m_activeFrame.frame == NULL)
+ {
+ // Returning normally to managed code.
+ _ASSERTE(info->m_activeFrame.md != NULL);
+
+ // Polymorphic check to skip over non-interesting frames.
+ if (!fForceTraditional && !this->IsInterestingFrame(&info->m_activeFrame))
+ continue;
+
+ dji = info->m_activeFrame.GetJitInfoFromFrame();
+ _ASSERTE(dji != NULL);
+
+ // Note: we used to pass in the IP from the active frame to GetJitInfo, but there seems to be no value
+ // in that, and it was causing problems creating a stepper while sitting in ndirect stubs after we'd
+ // returned from the unmanaged function that had been called.
+ ULONG reloffset = info->m_activeFrame.relOffset;
+
+ AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md,
+ dji,
+ reloffset,
+ info->m_returnFrame.fp,
+ NULL);
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "DS::TSO:normally managed code AddPatch"
+ " in %s::%s, offset 0x%x, m_reason=%d\n",
+ info->m_activeFrame.md->m_pszDebugClassName,
+ info->m_activeFrame.md->m_pszDebugMethodName,
+ reloffset, m_reason));
+
+
+ // Do not set m_reason to STEP_RETURN here. Logically, the funclet and the parent method are the
+ // same method, so we should not "return" to the parent method.
+ if (!fReturningFromFinallyFunclet)
+ {
+ m_reason = STEP_RETURN;
+ }
+ break;
+ }
+ else if (info->m_activeFrame.frame == FRAME_TOP)
+ {
+
+ // Trad-stepper's step-out is actually like a step-next when we go off the top.
+ // JMC-steppers do a true-step out. So for JMC-steppers, don't enable trace-call.
+ if (DEBUGGER_CONTROLLER_JMC_STEPPER == this->GetDCType())
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "DS::TSO: JMC stepper skipping exit-frame case.\n"));
+ break;
+ }
+
+ // User break should always be called from managed code, so it should never actually hit this codepath.
+ _ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT);
+
+
+ // We're walking off the top of the stack. Note that if we call managed code again,
+ // this trace-call will cause us our stepper-to fire. So we'll actually do a
+ // step-next; not a true-step out.
+ EnableTraceCall(info->m_activeFrame.fp);
+
+ LOG((LF_CORDB, LL_INFO1000, "DS::TSO: Off top of frame!\n"));
+
+ m_reason = STEP_EXIT; //we're on the way out..
+
+ // <REVISIT_TODO>@todo not that it matters since we don't send a
+ // stepComplete message to the right side.</REVISIT_TODO>
+ break;
+ }
+ else if (info->m_activeFrame.frame->GetFrameType() == Frame::TYPE_FUNC_EVAL)
+ {
+ // Note: we treat walking off the top of the stack and
+ // walking off the top of a func eval the same way,
+ // except that we don't enable trace call since we
+ // know exactly where were going.
+
+ LOG((LF_CORDB, LL_INFO1000,
+ "DS::TSO: Off top of func eval!\n"));
+
+ m_reason = STEP_EXIT;
+ break;
+ }
+ else if (info->m_activeFrame.frame->GetFrameType() == Frame::TYPE_SECURITY &&
+ info->m_activeFrame.frame->GetInterception() == Frame::INTERCEPTION_NONE)
+ {
+ // If we're stepping out of something that was protected by (declarative) security,
+ // the security subsystem may leave a frame on the stack to cache it's computation.
+ // HOWEVER, this isn't a real frame, and so we don't want to stop here. On the other
+ // hand, if we're in the security goop (sec. executes managed code to do stuff), then
+ // we'll want to use the "returning to stub case", below. GetInterception()==NONE
+ // indicates that the frame is just a cache frame:
+ // Skip it and keep on going
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "DS::TSO: returning to a non-intercepting frame. Keep unwinding\n"));
+ continue;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "DS::TSO: returning to a stub frame.\n"));
+
+ // User break should always be called from managed code, so it should never actually hit this codepath.
+ _ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT);
+
+ // JMC steppers shouldn't be patching stubs.
+ if (DEBUGGER_CONTROLLER_JMC_STEPPER == this->GetDCType())
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DS::TSO: JMC stepper skipping frame.\n"));
+ continue;
+ }
+
+ // We're returning to some funky frame.
+ // (E.g. a security frame has called a native method.)
+
+ // Patch the frame from entering other methods. This effectively gives the Step-out
+ // a step-next behavior. For eg, this can be useful for step-out going between multicast delegates.
+ // This step-next could actually land us leaf-more on the callstack than we currently are!
+ // If we were a true-step out, we'd skip this and keep crawling.
+ // up the callstack.
+ //
+ // !!! For now, we assume that the TraceFrame entry
+ // point is smart enough to tell where it is in the
+ // calling sequence. We'll see how this holds up.
+ TraceDestination trace;
+
+ // We don't want notifications of trace-calls leaf-more than our current frame.
+ // For eg, if our current frame calls out to unmanaged code and then back in,
+ // we'll get a TraceCall notification. But since it's leaf-more than our current frame,
+ // we don't care because we just want to step out of our current frame (and everything
+ // our current frame may call).
+ EnableTraceCall(info->m_activeFrame.fp);
+
+ CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers
+
+ if (g_pEEInterface->TraceFrame(GetThread(),
+ info->m_activeFrame.frame, FALSE,
+ &trace, &(info->m_activeFrame.registers))
+ && g_pEEInterface->FollowTrace(&trace)
+ && PatchTrace(&trace, info->m_activeFrame.fp,
+ true))
+ break;
+
+ // !!! Problem: we don't know which return frame to use -
+ // the TraceFrame patch may be in a frame below the return
+ // frame, or in a frame parallel with it
+ // (e.g. prestub popping itself & then calling.)
+ //
+ // For now, I've tweaked the FP comparison in the
+ // patch dispatching code to allow either case.
+ }
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "DS::TSO: return frame is not managed.\n"));
+
+ // Only step out to unmanaged code if we're actually
+ // marked to stop in unamanged code. Otherwise, just loop
+ // to get us past the unmanaged frames.
+ if (m_rgfMappingStop & STOP_UNMANAGED)
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "DS::TSO: return to unmanaged code "
+ "m_reason=STEP_RETURN\n"));
+
+ // Do not set m_reason to STEP_RETURN here. Logically, the funclet and the parent method are the
+ // same method, so we should not "return" to the parent method.
+ if (!fReturningFromFinallyFunclet)
+ {
+ m_reason = STEP_RETURN;
+ }
+
+ // We're stepping out into unmanaged code
+ LOG((LF_CORDB, LL_INFO10000,
+ "DS::TSO: Setting unmanaged trace patch at 0x%x(%x)\n",
+ GetControlPC(&(info->m_activeFrame.registers)),
+ info->m_returnFrame.fp.GetSPValue()));
+
+
+ AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE *)GetControlPC(&(info->m_activeFrame.registers)),
+ info->m_returnFrame.fp,
+ FALSE,
+ TRACE_UNMANAGED);
+
+ break;
+
+ }
+ }
+ }
+
+ // <REVISIT_TODO>If we get here, we may be stepping out of the last frame. Our thread
+ // exit logic should catch this case. (@todo)</REVISIT_TODO>
+ LOG((LF_CORDB, LL_INFO10000,"DS::TSO: done\n"));
+}
+
+
+// void DebuggerStepper::StepOut()
+// Called by Debugger::HandleIPCEvent to setup
+// everything so that the process will step over the range of IL
+// correctly.
+// How: Converts the provided array of ranges from IL ranges to
+// native ranges (if they're not native already), and then calls
+// TrapStep or TrapStepOut, like so:
+// Get the appropriate MethodDesc & JitInfo
+// Iterate through array of IL ranges, use
+// JitInfo::MapILRangeToMapEntryRange to translate IL to native
+// ranges.
+// Set member variables to remember that the DebuggerStepper now uses
+// the ranges: m_range, m_rangeCount, m_stepIn, m_fp
+// If (!TrapStep()) then {m_stepIn = true; TrapStepOut()}
+// EnableUnwind( m_fp );
+void DebuggerStepper::StepOut(FramePointer fp, StackTraceTicket ticket)
+{
+ LOG((LF_CORDB, LL_INFO10000, "Attempting to step out, fp:0x%x this:0x%x"
+ "\n", fp.GetSPValue(), this ));
+
+ Thread *thread = GetThread();
+
+
+ CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
+ ControllerStackInfo info;
+
+ // We pass in the ticket b/c this is called both when we're live (via
+ // DebuggerUserBreakpoint) and when we're stopped (via normal StepOut)
+ info.GetStackInfo(ticket, thread, fp, context);
+
+
+ ResetRange();
+
+
+ m_stepIn = FALSE;
+ m_fp = info.m_activeFrame.fp;
+#if defined(WIN64EXCEPTIONS)
+ // We need to remember the parent method frame pointer here so that we will recognize
+ // the range of the stepper as being valid when we return to the parent method.
+ if (info.m_activeFrame.IsNonFilterFuncletFrame())
+ {
+ m_fpParentMethod = info.m_returnFrame.fp;
+ }
+#endif // WIN64EXCEPTIONS
+
+ m_eMode = cStepOut;
+
+ _ASSERTE((fp == LEAF_MOST_FRAME) || (info.m_activeFrame.md != NULL) || (info.m_returnFrame.md != NULL));
+
+ TrapStepOut(&info);
+ EnableUnwind(m_fp);
+}
+
+#define GROW_RANGES_IF_NECESSARY() \
+ if (rTo == rToEnd) \
+ { \
+ ULONG NewSize, OldSize; \
+ if (!ClrSafeInt<ULONG>::multiply(sizeof(COR_DEBUG_STEP_RANGE), (ULONG)(realRangeCount*2), NewSize) || \
+ !ClrSafeInt<ULONG>::multiply(sizeof(COR_DEBUG_STEP_RANGE), (ULONG)realRangeCount, OldSize) || \
+ NewSize < OldSize) \
+ { \
+ DeleteInteropSafe(m_range); \
+ m_range = NULL; \
+ return false; \
+ } \
+ COR_DEBUG_STEP_RANGE *_pTmp = (COR_DEBUG_STEP_RANGE*) \
+ g_pDebugger->GetInteropSafeHeap()->Realloc(m_range, \
+ NewSize, \
+ OldSize); \
+ \
+ if (_pTmp == NULL) \
+ { \
+ DeleteInteropSafe(m_range); \
+ m_range = NULL; \
+ return false; \
+ } \
+ \
+ m_range = _pTmp; \
+ rTo = m_range + realRangeCount; \
+ rToEnd = m_range + (realRangeCount*2); \
+ realRangeCount *= 2; \
+ }
+
+//-----------------------------------------------------------------------------
+// Given a set of IL ranges, convert them to native and cache them.
+// Return true on success, false on error.
+//-----------------------------------------------------------------------------
+bool DebuggerStepper::SetRangesFromIL(DebuggerJitInfo *dji, COR_DEBUG_STEP_RANGE *ranges, SIZE_T rangeCount)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ WRAPPER(THROWS);
+ GC_NOTRIGGER;
+ PRECONDITION(ThisIsHelperThreadWorker()); // Only help initializes a stepper.
+ PRECONDITION(m_range == NULL); // shouldn't be set already.
+ PRECONDITION(CheckPointer(ranges));
+ PRECONDITION(CheckPointer(dji));
+ }
+ CONTRACTL_END;
+
+ // Note: we used to pass in the IP from the active frame to GetJitInfo, but there seems to be no value in that, and
+ // it was causing problems creating a stepper while sitting in ndirect stubs after we'd returned from the unmanaged
+ // function that had been called.
+ MethodDesc *fd = dji->m_fd;
+
+ // The "+1" is for internal use, when we need to
+ // set an intermediate patch in pitched code. Isn't
+ // used unless the method is pitched & a patch is set
+ // inside it. Thus we still pass cRanges as the
+ // range count.
+ m_range = new (interopsafe) COR_DEBUG_STEP_RANGE[rangeCount+1];
+
+ if (m_range == NULL)
+ return false;
+
+ TRACE_ALLOC(m_range);
+
+ SIZE_T realRangeCount = rangeCount;
+
+ if (dji != NULL)
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DeSt::St: For code md=0x%x, got DJI 0x%x, from 0x%x to 0x%x\n",
+ fd,
+ dji, dji->m_addrOfCode, (ULONG)dji->m_addrOfCode
+ + (ULONG)dji->m_sizeOfCode));
+
+ //
+ // Map ranges to native offsets for jitted code
+ //
+ COR_DEBUG_STEP_RANGE *r, *rEnd, *rTo, *rToEnd;
+
+ r = ranges;
+ rEnd = r + rangeCount;
+
+ rTo = m_range;
+ rToEnd = rTo + realRangeCount;
+
+ // <NOTE>
+ // rTo may also be incremented in the middle of the loop on WIN64 platforms.
+ // </NOTE>
+ for (/**/; r < rEnd; r++, rTo++)
+ {
+ // If we are already at the end of our allocated array, but there are still
+ // more ranges to copy over, then grow the array.
+ GROW_RANGES_IF_NECESSARY();
+
+ if (r->startOffset == 0 && r->endOffset == (ULONG) ~0)
+ {
+ // {0...-1} means use the entire method as the range
+ // Code dup'd from below case.
+ LOG((LF_CORDB, LL_INFO10000, "DS:Step: Have DJI, special (0,-1) entry\n"));
+ rTo->startOffset = 0;
+ rTo->endOffset = (ULONG32)g_pEEInterface->GetFunctionSize(fd);
+ }
+ else
+ {
+ //
+ // One IL range may consist of multiple
+ // native ranges.
+ //
+
+ DebuggerILToNativeMap *mStart, *mEnd;
+
+ dji->MapILRangeToMapEntryRange(r->startOffset,
+ r->endOffset,
+ &mStart,
+ &mEnd);
+
+ // Either mStart and mEnd are both NULL (we don't have any sequence point),
+ // or they are both non-NULL.
+ _ASSERTE( ((mStart == NULL) && (mEnd == NULL)) ||
+ ((mStart != NULL) && (mEnd != NULL)) );
+
+ if (mStart == NULL)
+ {
+ // <REVISIT_TODO>@todo Won't this result in us stepping across
+ // the entire method?</REVISIT_TODO>
+ rTo->startOffset = 0;
+ rTo->endOffset = 0;
+ }
+ else if (mStart == mEnd)
+ {
+ rTo->startOffset = mStart->nativeStartOffset;
+ rTo->endOffset = mStart->nativeEndOffset;
+ }
+ else
+ {
+ // Account for more than one continuous range here.
+
+ // Move the pointer back to work with the loop increment below.
+ // Don't dereference this pointer now!
+ rTo--;
+
+ for (DebuggerILToNativeMap* pMap = mStart;
+ pMap <= mEnd;
+ pMap = pMap + 1)
+ {
+ if ((pMap == mStart) ||
+ (pMap->nativeStartOffset != (pMap-1)->nativeEndOffset))
+ {
+ rTo++;
+ GROW_RANGES_IF_NECESSARY();
+
+ rTo->startOffset = pMap->nativeStartOffset;
+ rTo->endOffset = pMap->nativeEndOffset;
+ }
+ else
+ {
+ // If we have continuous ranges, then lump them together.
+ _ASSERTE(rTo->endOffset == pMap->nativeStartOffset);
+ rTo->endOffset = pMap->nativeEndOffset;
+ }
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "DS:Step: nat off:0x%x to 0x%x\n", rTo->startOffset, rTo->endOffset));
+ }
+ }
+ }
+
+ rangeCount = (int)((BYTE*)rTo - (BYTE*)m_range) / sizeof(COR_DEBUG_STEP_RANGE);
+ }
+ else
+ {
+ // Even if we don't have debug info, we'll be able to
+ // step through the method
+ SIZE_T functionSize = g_pEEInterface->GetFunctionSize(fd);
+
+ COR_DEBUG_STEP_RANGE *r = ranges;
+ COR_DEBUG_STEP_RANGE *rEnd = r + rangeCount;
+
+ COR_DEBUG_STEP_RANGE *rTo = m_range;
+
+ for(/**/; r < rEnd; r++, rTo++)
+ {
+ if (r->startOffset == 0 && r->endOffset == (ULONG) ~0)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DS:Step:No DJI, (0,-1) special entry\n"));
+ // Code dup'd from above case.
+ // {0...-1} means use the entire method as the range
+ rTo->startOffset = 0;
+ rTo->endOffset = (ULONG32)functionSize;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DS:Step:No DJI, regular entry\n"));
+ // We can't just leave ths IL entry - we have to
+ // get rid of it.
+ // This will just be ignored
+ rTo->startOffset = rTo->endOffset = (ULONG32)functionSize;
+ }
+ }
+ }
+
+
+ m_rangeCount = rangeCount;
+ m_realRangeCount = rangeCount;
+
+ return true;
+}
+
+
+// void DebuggerStepper::Step() Tells the stepper to step over
+// the provided ranges.
+// void *fp: frame pointer.
+// bool in: true if we want to step into a function within the range,
+// false if we want to step over functions within the range.
+// COR_DEBUG_STEP_RANGE *ranges: Assumed to be nonNULL, it will
+// always hold at least one element.
+// SIZE_T rangeCount: One less than the true number of elements in
+// the ranges argument.
+// bool rangeIL: true if the ranges are provided in IL (they'll be
+// converted to native before the DebuggerStepper uses them,
+// false if they already are native.
+bool DebuggerStepper::Step(FramePointer fp, bool in,
+ COR_DEBUG_STEP_RANGE *ranges, SIZE_T rangeCount,
+ bool rangeIL)
+{
+ LOG((LF_CORDB, LL_INFO1000, "DeSt:Step this:0x%x ", this));
+ if (rangeCount>0)
+ LOG((LF_CORDB,LL_INFO10000," start,end[0]:(0x%x,0x%x)\n",
+ ranges[0].startOffset, ranges[0].endOffset));
+ else
+ LOG((LF_CORDB,LL_INFO10000," single step\n"));
+
+ Thread *thread = GetThread();
+ CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
+
+ // ControllerStackInfo doesn't report IL stubs, so if we are in an IL stub, we need
+ // to handle the single-step specially. There are probably other problems when we stop
+ // in an IL stub. We need to revisit this later.
+ bool fIsILStub = false;
+ if ((context != NULL) &&
+ g_pEEInterface->IsManagedNativeCode(reinterpret_cast<const BYTE *>(GetIP(context))))
+ {
+ MethodDesc * pMD = g_pEEInterface->GetNativeCodeMethodDesc(GetIP(context));
+ if (pMD != NULL)
+ {
+ fIsILStub = pMD->IsILStub();
+ }
+ }
+ LOG((LF_CORDB, LL_INFO10000, "DS::S - fIsILStub = %d\n", fIsILStub));
+
+ ControllerStackInfo info;
+
+
+ StackTraceTicket ticket(thread);
+ info.GetStackInfo(ticket, thread, fp, context);
+
+ _ASSERTE((fp == LEAF_MOST_FRAME) || (info.m_activeFrame.md != NULL) ||
+ (info.m_returnFrame.md != NULL));
+
+ m_stepIn = in;
+
+ DebuggerJitInfo *dji = info.m_activeFrame.GetJitInfoFromFrame();
+
+ if (dji == NULL)
+ {
+ // !!! ERROR range step in frame with no code
+ ranges = NULL;
+ rangeCount = 0;
+ }
+
+
+ if (m_range != NULL)
+ {
+ TRACE_FREE(m_range);
+ DeleteInteropSafe(m_range);
+ m_range = NULL;
+ m_rangeCount = 0;
+ m_realRangeCount = 0;
+ }
+
+ if (rangeCount > 0)
+ {
+ if (rangeIL)
+ {
+ // IL ranges supplied, we need to convert them to native ranges.
+ bool fOk = SetRangesFromIL(dji, ranges, rangeCount);
+ if (!fOk)
+ {
+ return false;
+ }
+ }
+ else
+ {
+ // Native ranges, already supplied. Just copy them over.
+ m_range = new (interopsafe) COR_DEBUG_STEP_RANGE[rangeCount];
+
+ if (m_range == NULL)
+ {
+ return false;
+ }
+
+ memcpy(m_range, ranges, sizeof(COR_DEBUG_STEP_RANGE) * rangeCount);
+ m_realRangeCount = m_rangeCount = rangeCount;
+ }
+ _ASSERTE(m_range != NULL);
+ _ASSERTE(m_rangeCount > 0);
+ _ASSERTE(m_realRangeCount > 0);
+ }
+ else
+ {
+ // !!! ERROR cannot map IL ranges
+ ranges = NULL;
+ rangeCount = 0;
+ }
+
+ if (fIsILStub)
+ {
+ // Don't use the ControllerStackInfo if we are in an IL stub.
+ m_fp = fp;
+ }
+ else
+ {
+ m_fp = info.m_activeFrame.fp;
+#if defined(WIN64EXCEPTIONS)
+ // We need to remember the parent method frame pointer here so that we will recognize
+ // the range of the stepper as being valid when we return to the parent method.
+ if (info.m_activeFrame.IsNonFilterFuncletFrame())
+ {
+ m_fpParentMethod = info.m_returnFrame.fp;
+ }
+#endif // WIN64EXCEPTIONS
+ }
+ m_eMode = m_stepIn ? cStepIn : cStepOver;
+
+ LOG((LF_CORDB,LL_INFO10000,"DS 0x%x STep: STEP_NORMAL\n",this));
+ m_reason = STEP_NORMAL; //assume it'll be a normal step & set it to
+ //something else if we walk over it
+ if (fIsILStub)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DS:Step: stepping in an IL stub\n"));
+
+ // Enable the right triggers if the user wants to step in.
+ if (in)
+ {
+ if (this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER)
+ {
+ EnableTraceCall(info.m_activeFrame.fp);
+ }
+ else if (this->GetDCType() == DEBUGGER_CONTROLLER_JMC_STEPPER)
+ {
+ EnableMethodEnter();
+ }
+ }
+
+ // Also perform a step-out in case this IL stub is returning to managed code.
+ // However, we must fix up the ControllerStackInfo first, since it doesn't
+ // report IL stubs. The active frame reported by the ControllerStackInfo is
+ // actually the return frame in this case.
+ info.SetReturnFrameWithActiveFrame();
+ TrapStepOut(&info);
+ }
+ else if (!TrapStep(&info, in))
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS:Step: Did TS\n"));
+ m_stepIn = true;
+ TrapStepNext(&info);
+ }
+
+ LOG((LF_CORDB,LL_INFO10000,"DS:Step: Did TS,TSO\n"));
+
+ EnableUnwind(m_fp);
+
+ return true;
+}
+
+// TP_RESULT DebuggerStepper::TriggerPatch()
+// What: Triggers patch if we're not in a stub, and we're
+// outside of the stepping range. Otherwise sets another patch so as to
+// step out of the stub, or in the next instruction within the range.
+// How: If module==NULL & managed==> we're in a stub:
+// TrapStepOut() and return false. Module==NULL&!managed==> return
+// true. If m_range != NULL & execution is currently in the range,
+// attempt a TrapStep (TrapStepOut otherwise) & return false. Otherwise,
+// return true.
+TP_RESULT DebuggerStepper::TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DeSt::TP\n"));
+
+ // If we're frozen, we may hit a patch but we just ignore it
+ if (IsFrozen())
+ {
+ LOG((LF_CORDB, LL_INFO1000000, "DS::TP, ignoring patch at %p during frozen state\n", patch->address));
+ return TPR_IGNORE;
+ }
+
+ Module *module = patch->key.module;
+ BOOL managed = patch->IsManagedPatch();
+ mdMethodDef md = patch->key.md;
+ SIZE_T offset = patch->offset;
+
+ _ASSERTE((this->GetThread() == thread) || !"Stepper should only get patches on its thread");
+
+ // Note we can only run a stack trace if:
+ // - the context is in managed code (eg, not a stub)
+ // - OR we have a frame in place to prime the stackwalk.
+ ControllerStackInfo info;
+ CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
+
+ _ASSERTE(!ISREDIRECTEDTHREAD(thread));
+
+ // Context should always be from patch.
+ _ASSERTE(context != NULL);
+
+ bool fSafeToDoStackTrace = true;
+
+ // If we're in a stub (module == NULL and still in managed code), then our context is off in lala-land
+ // Then, it's only safe to do a stackwalk if the top frame is protecting us. That's only true for a
+ // frame_push. If we're here on a manager_push, then we don't have any such protection, so don't do the
+ // stackwalk.
+
+ fSafeToDoStackTrace = patch->IsSafeForStackTrace();
+
+
+ if (fSafeToDoStackTrace)
+ {
+ StackTraceTicket ticket(patch);
+ info.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, context);
+
+ LOG((LF_CORDB, LL_INFO10000, "DS::TP: this:0x%p in %s::%s (fp:0x%p, "
+ "off:0x%p md:0x%p), \n\texception source:%s::%s (fp:0x%p)\n",
+ this,
+ info.m_activeFrame.md!=NULL?info.m_activeFrame.md->m_pszDebugClassName:"Unknown",
+ info.m_activeFrame.md!=NULL?info.m_activeFrame.md->m_pszDebugMethodName:"Unknown",
+ info.m_activeFrame.fp.GetSPValue(), patch->offset, patch->key.md,
+ m_fdException!=NULL?m_fdException->m_pszDebugClassName:"None",
+ m_fdException!=NULL?m_fdException->m_pszDebugMethodName:"None",
+ m_fpException.GetSPValue()));
+ }
+
+ DisableAll();
+
+ if (DetectHandleLCGMethods(dac_cast<PCODE>(patch->address), NULL, &info))
+ {
+ return TPR_IGNORE;
+ }
+
+ if (module == NULL)
+ {
+ // JMC steppers should not be patching here...
+ _ASSERTE(DEBUGGER_CONTROLLER_JMC_STEPPER != this->GetDCType());
+
+ if (managed)
+ {
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "Frame (stub) patch hit at offset 0x%x\n", offset));
+
+ // This is a stub patch. If it was a TRACE_FRAME_PUSH that
+ // got us here, then the stub's frame is pushed now, so we
+ // tell the frame to apply the real patch. If we got here
+ // via a TRACE_MGR_PUSH, however, then there is no frame
+ // and we tell the stub manager that generated the
+ // TRACE_MGR_PUSH to apply the real patch.
+ TraceDestination trace;
+ bool traceOk;
+ FramePointer frameFP;
+ PTR_BYTE traceManagerRetAddr = NULL;
+
+ if (patch->trace.GetTraceType() == TRACE_MGR_PUSH)
+ {
+ _ASSERTE(context != NULL);
+ CONTRACT_VIOLATION(GCViolation);
+ traceOk = g_pEEInterface->TraceManager(
+ thread,
+ patch->trace.GetStubManager(),
+ &trace,
+ context,
+ &traceManagerRetAddr);
+
+ // We don't hae an active frame here, so patch with a
+ // FP of NULL so anything will match.
+ //
+ // <REVISIT_TODO>@todo: should we take Esp out of the context?</REVISIT_TODO>
+ frameFP = LEAF_MOST_FRAME;
+ }
+ else
+ {
+ _ASSERTE(fSafeToDoStackTrace);
+ CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers
+ traceOk = g_pEEInterface->TraceFrame(thread,
+ thread->GetFrame(),
+ TRUE,
+ &trace,
+ &(info.m_activeFrame.registers));
+
+ frameFP = info.m_activeFrame.fp;
+ }
+
+ // Enable the JMC backstop for traditional steppers to catch us in case
+ // we didn't predict the call target properly.
+ EnableJMCBackStop(NULL);
+
+
+ if (!traceOk
+ || !g_pEEInterface->FollowTrace(&trace)
+ || !PatchTrace(&trace, frameFP,
+ (m_rgfMappingStop&STOP_UNMANAGED)?
+ (true):(false)))
+ {
+ //
+ // We can't set a patch in the frame -- we need
+ // to trap returning from this frame instead.
+ //
+ // Note: if we're in the TRACE_MGR_PUSH case from
+ // above, then we must place a patch where the
+ // TraceManager function told us to, since we can't
+ // actually unwind from here.
+ //
+ if (patch->trace.GetTraceType() != TRACE_MGR_PUSH)
+ {
+ _ASSERTE(fSafeToDoStackTrace);
+ LOG((LF_CORDB,LL_INFO10000,"TSO for non TRACE_MGR_PUSH case\n"));
+ TrapStepOut(&info);
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "TSO for TRACE_MGR_PUSH case."));
+
+ // We'd better have a valid return address.
+ _ASSERTE(traceManagerRetAddr != NULL);
+
+ if (g_pEEInterface->IsManagedNativeCode(traceManagerRetAddr))
+ {
+ // Grab the jit info for the method.
+ DebuggerJitInfo *dji;
+ dji = g_pDebugger->GetJitInfoFromAddr((TADDR) traceManagerRetAddr);
+
+ MethodDesc * mdNative = (dji == NULL) ?
+ g_pEEInterface->GetNativeCodeMethodDesc(dac_cast<PCODE>(traceManagerRetAddr)) : dji->m_fd;
+ _ASSERTE(mdNative != NULL);
+
+ // Find the method that the return is to.
+ _ASSERTE(g_pEEInterface->GetFunctionAddress(mdNative) != NULL);
+ SIZE_T offsetRet = dac_cast<TADDR>(traceManagerRetAddr -
+ g_pEEInterface->GetFunctionAddress(mdNative));
+
+ // Place the patch.
+ AddBindAndActivateNativeManagedPatch(mdNative,
+ dji,
+ offsetRet,
+ LEAF_MOST_FRAME,
+ NULL);
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "DS::TP: normally managed code AddPatch"
+ " in %s::%s, offset 0x%x\n",
+ mdNative->m_pszDebugClassName,
+ mdNative->m_pszDebugMethodName,
+ offsetRet));
+ }
+ else
+ {
+ // We're hitting this code path with MC++ assemblies
+ // that have an unmanaged entry point so the stub returns to CallDescrWorker.
+ _ASSERTE(g_pEEInterface->GetNativeCodeMethodDesc(dac_cast<PCODE>(patch->address))->IsILStub());
+ }
+
+ }
+
+ m_reason = STEP_NORMAL; //we tried to do a STEP_CALL, but since it didn't
+ //work, we're doing what amounts to a normal step.
+ LOG((LF_CORDB,LL_INFO10000,"DS 0x%x m_reason = STEP_NORMAL"
+ "(attempted call thru stub manager, SM didn't know where"
+ " we're going, so did a step out to original call\n",this));
+ }
+ else
+ {
+ m_reason = STEP_CALL;
+ }
+
+ EnableTraceCall(LEAF_MOST_FRAME);
+ EnableUnwind(m_fp);
+
+ return TPR_IGNORE;
+ }
+ else
+ {
+ // @todo - when would we hit this codepath?
+ // If we're not in managed, then we should have pushed a frame onto the Thread's frame chain,
+ // and thus we should still safely be able to do a stackwalk here.
+ _ASSERTE(fSafeToDoStackTrace);
+ if (DetectHandleInterceptors(&info) )
+ {
+ return TPR_IGNORE; //don't actually want to stop
+ }
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "Unmanaged step patch hit at 0x%x\n", offset));
+
+ StackTraceTicket ticket(patch);
+ PrepareForSendEvent(ticket);
+ return TPR_TRIGGER;
+ }
+ } // end (module == NULL)
+
+ // If we're inside an interceptor but don't want to be,then we'll set a
+ // patch outside the current function.
+ _ASSERTE(fSafeToDoStackTrace);
+ if (DetectHandleInterceptors(&info) )
+ {
+ return TPR_IGNORE; //don't actually want to stop
+ }
+
+ LOG((LF_CORDB,LL_INFO10000, "DS: m_fp:0x%p, activeFP:0x%p fpExc:0x%p\n",
+ m_fp.GetSPValue(), info.m_activeFrame.fp.GetSPValue(), m_fpException.GetSPValue()));
+
+ if (IsInRange(offset, m_range, m_rangeCount, &info) ||
+ ShouldContinueStep( &info, offset))
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "Intermediate step patch hit at 0x%x\n", offset));
+
+ if (!TrapStep(&info, m_stepIn))
+ TrapStepNext(&info);
+
+ EnableUnwind(m_fp);
+ return TPR_IGNORE;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000, "Step patch hit at 0x%x\n", offset));
+
+ // For a JMC stepper, we have an additional constraint:
+ // skip non-user code. So if we're still in non-user code, then
+ // we've got to keep going
+ DebuggerMethodInfo * dmi = g_pDebugger->GetOrCreateMethodInfo(module, md);
+
+ if ((dmi != NULL) && DetectHandleNonUserCode(&info, dmi))
+ {
+ return TPR_IGNORE;
+ }
+
+ StackTraceTicket ticket(patch);
+ PrepareForSendEvent(ticket);
+ return TPR_TRIGGER;
+ }
+}
+
+// Return true if this should be skipped.
+// For a non-jmc stepper, we don't care about non-user code, so we
+// don't skip it and so we always return false.
+bool DebuggerStepper::DetectHandleNonUserCode(ControllerStackInfo *info, DebuggerMethodInfo * pInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return false;
+}
+
+// For regular steppers, trace-call is just a trace-call.
+void DebuggerStepper::EnablePolyTraceCall()
+{
+ this->EnableTraceCall(LEAF_MOST_FRAME);
+}
+
+// Traditional steppers enable MethodEnter as a back-stop for step-in.
+// We hope that the stub-managers will predict the step-in for us,
+// but in case they don't the Method-Enter should catch us.
+// MethodEnter is not fully correct for traditional steppers for a few reasons:
+// - doesn't handle step-in to native
+// - stops us *after* the prolog (a traditional stepper can stop us before the prolog).
+// - only works for methods that have the JMC probe. That can exclude all optimized code.
+void DebuggerStepper::TriggerMethodEnter(Thread * thread,
+ DebuggerJitInfo *dji,
+ const BYTE * ip,
+ FramePointer fp)
+{
+ _ASSERTE(dji != NULL);
+ _ASSERTE(thread != NULL);
+ _ASSERTE(ip != NULL);
+
+
+
+ _ASSERTE(this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER);
+
+ _ASSERTE(!IsFrozen());
+
+ MethodDesc * pDesc = dji->m_fd;
+ LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, desc=%p, addr=%p\n",
+ pDesc, ip));
+
+ // JMC steppers won't stop in Lightweight delegates. Just return & keep executing.
+ if (pDesc->IsNoMetadata())
+ {
+ LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TME, skipping b/c it's lw-codegen\n"));
+ return;
+ }
+
+ // This is really just a heuristic. We don't want to trigger a JMC probe when we are
+ // executing in an IL stub, or in one of the marshaling methods called by the IL stub.
+ // The problem is that the IL stub can call into arbitrary code, including custom marshalers.
+ // In that case the user has to put a breakpoint to stop in the code.
+ if (g_pEEInterface->DetectHandleILStubs(thread))
+ {
+ return;
+ }
+
+#ifdef _DEBUG
+ // To help trace down if a problem is related to a stubmanager,
+ // we add a knob that lets us skip the MethodEnter checks. This lets tests directly
+ // go against the Stub-managers w/o the MethodEnter check backstops.
+ int fSkip = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgSkipMEOnStep);
+ if (fSkip)
+ {
+ return;
+ }
+
+ // See EnableJMCBackStop() for details here. This check just makes sure that we don't fire
+ // the assert if we end up in the method we started in (which could happen if we trace call
+ // instructions before the JMC probe).
+ // m_StepInStartMethod may be null (if this step-in didn't start from managed code).
+ if ((m_StepInStartMethod != pDesc) &&
+ (!m_StepInStartMethod->IsLCGMethod()))
+ {
+ // Since normal step-in should stop us at the prolog, and TME is after the prolog,
+ // if a stub-manager did successfully find the address, we should get a TriggerPatch first
+ // at native offset 0 (before the prolog) and before we get the TME. That means if
+ // we do get the TME, then there was no stub-manager to find us.
+
+ SString sLog;
+ StubManager::DbgGetLog(&sLog);
+
+ // Assert b/c the Stub-manager should have caught us first.
+ // We don't want people relying on TriggerMethodEnter as the real implementation for Traditional Step-in
+ // (see above for reasons why). However, using TME will provide a bandage for the final retail product
+ // in cases where we are missing a stub-manager.
+ CONSISTENCY_CHECK_MSGF(false, (
+ "\nThe Stubmanagers failed to identify and trace a stub on step-in. The stub-managers for this code-path path need to be fixed.\n"
+ "See http://team/sites/clrdev/Devdocs/StubManagers.rtf for more information on StubManagers.\n"
+ "Stepper this=0x%p, startMethod='%s::%s'\n"
+ "---------------------------------\n"
+ "Stub manager log:\n%S"
+ "\n"
+ "The thread is now in managed method '%s::%s'.\n"
+ "---------------------------------\n",
+ this,
+ ((m_StepInStartMethod == NULL) ? "unknown" : m_StepInStartMethod->m_pszDebugClassName),
+ ((m_StepInStartMethod == NULL) ? "unknown" : m_StepInStartMethod->m_pszDebugMethodName),
+ sLog.GetUnicode(),
+ pDesc->m_pszDebugClassName, pDesc->m_pszDebugMethodName
+ ));
+ }
+#endif
+
+
+
+ // Place a patch to stopus.
+ // Don't bind to a particular AppDomain so that we can do a Cross-Appdomain step.
+ AddBindAndActivateNativeManagedPatch(pDesc,
+ dji,
+ CodeRegionInfo::GetCodeRegionInfo(dji, pDesc).AddressToOffset(ip),
+ fp,
+ NULL // AppDomain
+ );
+
+ LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, after setting patch to stop\n"));
+
+ // Once we resume, we'll go hit that patch (duh, we patched our return address)
+ // Furthermore, we know the step will complete with reason = call, so set that now.
+ m_reason = STEP_CALL;
+}
+
+
+// We may have single-stepped over a return statement to land us up a frame.
+// Or we may have single-stepped through a method.
+// We never single-step into calls (we place a patch at the call destination).
+bool DebuggerStepper::TriggerSingleStep(Thread *thread, const BYTE *ip)
+{
+ LOG((LF_CORDB,LL_INFO10000,"DS:TSS this:0x%x, @ ip:0x%x\n", this, ip));
+
+ _ASSERTE(!IsFrozen());
+
+ // User break should only do a step-out and never actually need a singlestep flag.
+ _ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT);
+
+ //
+ // there's one weird case here - if the last instruction generated
+ // a hardware exception, we may be in lala land. If so, rely on the unwind
+ // handler to figure out what happened.
+ //
+ // <REVISIT_TODO>@todo this could be wrong when we have the incremental collector going</REVISIT_TODO>
+ //
+
+ if (!g_pEEInterface->IsManagedNativeCode(ip))
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DS::TSS: not in managed code, Returning false (case 0)!\n"));
+ DisableSingleStep();
+ return false;
+ }
+
+ // If we EnC the method, we'll blast the function address,
+ // and so have to get it from teh DJI that we'll have. If
+ // we haven't gotten debugger info about a regular function, then
+ // we'll have to get the info from the EE, which will be valid
+ // since we're standing in the function at this point, and
+ // EnC couldn't have happened yet.
+ MethodDesc *fd = g_pEEInterface->GetNativeCodeMethodDesc((PCODE)ip);
+
+ SIZE_T offset;
+ DebuggerJitInfo *dji = g_pDebugger->GetJitInfoFromAddr((TADDR) ip);
+ offset = CodeRegionInfo::GetCodeRegionInfo(dji, fd).AddressToOffset(ip);
+
+ ControllerStackInfo info;
+
+ // Safe to stackwalk b/c we've already checked that our IP is in crawlable code.
+ StackTraceTicket ticket(ip);
+ info.GetStackInfo(ticket, GetThread(), LEAF_MOST_FRAME, NULL);
+
+ // This is a special case where we return from a managed method back to an IL stub. This can
+ // only happen if there's no more managed method frames closer to the root and we want to perform
+ // a step out, or if we step-next off the end of a method called by an IL stub. In either case,
+ // we'll get a single step in an IL stub, which we want to ignore. We also want to enable trace
+ // call here, just in case this IL stub is about to call the managed target (in the reverse interop case).
+ if (fd->IsILStub())
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DS::TSS: not in managed code, Returning false (case 0)!\n"));
+ if (this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER)
+ {
+ EnableTraceCall(info.m_activeFrame.fp);
+ }
+ else if (this->GetDCType() == DEBUGGER_CONTROLLER_JMC_STEPPER)
+ {
+ EnableMethodEnter();
+ }
+ DisableSingleStep();
+ return false;
+ }
+
+ DisableAll();
+
+ LOG((LF_CORDB,LL_INFO10000, "DS::TSS m_fp:0x%x, activeFP:0x%x fpExc:0x%x\n",
+ m_fp.GetSPValue(), info.m_activeFrame.fp.GetSPValue(), m_fpException.GetSPValue()));
+
+ if (DetectHandleLCGMethods((PCODE)ip, fd, &info))
+ {
+ return false;
+ }
+
+ if (IsInRange(offset, m_range, m_rangeCount, &info) ||
+ ShouldContinueStep( &info, offset))
+ {
+ if (!TrapStep(&info, m_stepIn))
+ TrapStepNext(&info);
+
+ EnableUnwind(m_fp);
+
+ LOG((LF_CORDB,LL_INFO10000, "DS::TSS: Returning false Case 1!\n"));
+ return false;
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO10000, "DS::TSS: Returning true Case 2 for reason STEP_%02x!\n", m_reason));
+
+ // @todo - when would a single-step (not a patch) land us in user-code?
+ // For a JMC stepper, we have an additional constraint:
+ // skip non-user code. So if we're still in non-user code, then
+ // we've got to keep going
+ DebuggerMethodInfo * dmi = g_pDebugger->GetOrCreateMethodInfo(fd->GetModule(), fd->GetMemberDef());
+
+ if ((dmi != NULL) && DetectHandleNonUserCode(&info, dmi))
+ return false;
+
+ PrepareForSendEvent(ticket);
+ return true;
+ }
+}
+
+void DebuggerStepper::TriggerTraceCall(Thread *thread, const BYTE *ip)
+{
+ LOG((LF_CORDB,LL_INFO10000,"DS:TTC this:0x%x, @ ip:0x%x\n",this,ip));
+ TraceDestination trace;
+
+ if (IsFrozen())
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS:TTC exit b/c of Frozen\n"));
+ return;
+ }
+
+ // This is really just a heuristic. We don't want to trigger a JMC probe when we are
+ // executing in an IL stub, or in one of the marshaling methods called by the IL stub.
+ // The problem is that the IL stub can call into arbitrary code, including custom marshalers.
+ // In that case the user has to put a breakpoint to stop in the code.
+ if (g_pEEInterface->DetectHandleILStubs(thread))
+ {
+ return;
+ }
+
+ if (g_pEEInterface->TraceStub(ip, &trace)
+ && g_pEEInterface->FollowTrace(&trace)
+ && PatchTrace(&trace, LEAF_MOST_FRAME,
+ (m_rgfMappingStop&STOP_UNMANAGED)?(true):(false)))
+ {
+ // !!! We really want to know ahead of time if PatchTrace will succeed.
+ DisableAll();
+ PatchTrace(&trace, LEAF_MOST_FRAME, (m_rgfMappingStop&STOP_UNMANAGED)?
+ (true):(false));
+
+ // If we're triggering a trace call, and we're following a trace into either managed code or unjitted managed
+ // code, then we need to update our stepper's reason to STEP_CALL to reflect the fact that we're going to land
+ // into a new function because of a call.
+ if ((trace.GetTraceType() == TRACE_UNJITTED_METHOD) || (trace.GetTraceType() == TRACE_MANAGED))
+ {
+ m_reason = STEP_CALL;
+ }
+
+ EnableUnwind(m_fp);
+
+ LOG((LF_CORDB, LL_INFO10000, "DS::TTC potentially a step call!\n"));
+ }
+}
+
+void DebuggerStepper::TriggerUnwind(Thread *thread,
+ MethodDesc *fd, DebuggerJitInfo * pDJI, SIZE_T offset,
+ FramePointer fp,
+ CorDebugStepReason unwindReason)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS; // from GetJitInfo
+ GC_NOTRIGGER; // don't send IPC events
+ MODE_COOPERATIVE; // TriggerUnwind always is coop
+
+ PRECONDITION(!IsDbgHelperSpecialThread());
+ PRECONDITION(fd->IsDynamicMethod() || (pDJI != NULL));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_INFO10000,"DS::TU this:0x%p, in %s::%s, offset 0x%p "
+ "frame:0x%p unwindReason:0x%x\n", this, fd->m_pszDebugClassName,
+ fd->m_pszDebugMethodName, offset, fp.GetSPValue(), unwindReason));
+
+ _ASSERTE(unwindReason == STEP_EXCEPTION_FILTER || unwindReason == STEP_EXCEPTION_HANDLER);
+
+ if (IsFrozen())
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DS:TTC exit b/c of Frozen\n"));
+ return;
+ }
+
+ if (IsCloserToRoot(fp, GetUnwind()))
+ {
+ // Handler is in a parent frame . For all steps (in,out,over)
+ // we want to stop in the handler.
+ // This will be like a Step Out, so we don't need any range.
+ ResetRange();
+ }
+ else
+ {
+ // Handler/Filter is in the same frame as the stepper
+ // For a step-in/over, we want to patch the handler/filter.
+ // But for a step-out, we want to just continue executing (and don't change
+ // the step-reason either).
+ if (m_eMode == cStepOut)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DS::TU Step-out, returning for same-frame case.\n"));
+ return;
+ }
+
+ }
+
+ // Remember the origin of the exception, so that if the step looks like
+ // it's going to complete in a different frame, but the code comes from the
+ // same frame as the one we're in, we won't stop twice in the "same" range
+ m_fpException = fp;
+ m_fdException = fd;
+
+ //
+ // An exception is exiting the step region. Set a patch on
+ // the filter/handler.
+ //
+
+ DisableAll();
+
+ BOOL fOk;
+ fOk = AddBindAndActivateNativeManagedPatch(fd, pDJI, offset, LEAF_MOST_FRAME, NULL);
+
+ // Since we're unwinding to an already executed method, the method should already
+ // be jitted and placing the patch should work.
+ CONSISTENCY_CHECK_MSGF(fOk, ("Failed to place patch at TriggerUnwind.\npThis=0x%p md=0x%p, native offset=0x%x\n", this, fd, offset));
+
+ LOG((LF_CORDB,LL_INFO100000,"Step reason:%s\n", unwindReason==STEP_EXCEPTION_FILTER
+ ? "STEP_EXCEPTION_FILTER":"STEP_EXCEPTION_HANDLER"));
+ m_reason = unwindReason;
+}
+
+
+// Prepare for sending an event.
+// This is called 1:1 w/ SendEvent, but this guy can be called in a GC_TRIGGERABLE context
+// whereas SendEvent is pretty strict.
+// Caller ensures that it's safe to run a stack trace.
+void DebuggerStepper::PrepareForSendEvent(StackTraceTicket ticket)
+{
+#ifdef _DEBUG
+ _ASSERTE(!m_fReadyToSend);
+ m_fReadyToSend = true;
+#endif
+
+ LOG((LF_CORDB, LL_INFO10000, "DS::SE m_fpStepInto:0x%x\n", m_fpStepInto.GetSPValue()));
+
+ if (m_fpStepInto != LEAF_MOST_FRAME)
+ {
+ ControllerStackInfo csi;
+ csi.GetStackInfo(ticket, GetThread(), LEAF_MOST_FRAME, NULL);
+
+ if (csi.m_targetFrameFound &&
+#if !defined(WIN64EXCEPTIONS)
+ IsCloserToRoot(m_fpStepInto, csi.m_activeFrame.fp)
+#else
+ IsCloserToRoot(m_fpStepInto, (csi.m_activeFrame.IsNonFilterFuncletFrame() ? csi.m_returnFrame.fp : csi.m_activeFrame.fp))
+#endif // WIN64EXCEPTIONS
+ )
+
+ {
+ m_reason = STEP_CALL;
+ LOG((LF_CORDB, LL_INFO10000, "DS::SE this:0x%x STEP_CALL!\n", this));
+ }
+#ifdef _DEBUG
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DS::SE this:0x%x not a step call!\n", this));
+ }
+#endif
+ }
+
+#ifdef _DEBUG
+ // Steppers should only stop in interesting code.
+ if (this->GetDCType() == DEBUGGER_CONTROLLER_JMC_STEPPER)
+ {
+ // If we're at either a patch or SS, we'll have a context.
+ CONTEXT *context = g_pEEInterface->GetThreadFilterContext(GetThread());
+ if (context == NULL)
+ {
+ void * pIP = CORDbgGetIP(reinterpret_cast<DT_CONTEXT *>(context));
+
+ DebuggerJitInfo * dji = g_pDebugger->GetJitInfoFromAddr((TADDR) pIP);
+ DebuggerMethodInfo * dmi = NULL;
+ if (dji != NULL)
+ {
+ dmi = dji->m_methodInfo;
+
+ CONSISTENCY_CHECK_MSGF(dmi->IsJMCFunction(), ("JMC stepper %p stopping in non-jmc method, MD=%p, '%s::%s'",
+ this, dji->m_fd, dji->m_fd->m_pszDebugClassName, dji->m_fd->m_pszDebugMethodName));
+
+ }
+
+
+ }
+ }
+
+#endif
+}
+
+bool DebuggerStepper::SendEvent(Thread *thread, bool fIpChanged)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ SENDEVENT_CONTRACT_ITEMS;
+ }
+ CONTRACTL_END;
+
+ // We practically should never have a step interupted by SetIp.
+ // We'll still go ahead and send the Step-complete event because we've already
+ // deactivated our triggers by now and we haven't placed any new patches to catch us.
+ // We assert here because we don't believe we'll ever be able to hit this scenario.
+ // This is technically an issue, but we consider it benign enough to leave in.
+ _ASSERTE(!fIpChanged || !"Stepper interupted by SetIp");
+
+ LOG((LF_CORDB, LL_INFO10000, "DS::SE m_fpStepInto:0x%x\n", m_fpStepInto.GetSPValue()));
+
+ _ASSERTE(m_fReadyToSend);
+ _ASSERTE(GetThread() == thread);
+
+ CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
+ _ASSERTE(!ISREDIRECTEDTHREAD(thread));
+
+ // We need to send the stepper and delete the controller because our stepper
+ // no longer has any patches or other triggers that will let it send the step-complete event.
+ g_pDebugger->SendStep(thread, context, this, m_reason);
+
+ this->Delete();
+
+#ifdef _DEBUG
+ // Now that we've sent the event, we can stop recording information.
+ StubManager::DbgFinishLog();
+#endif
+
+ return true;
+}
+
+void DebuggerStepper::ResetRange()
+{
+ if (m_range)
+ {
+ TRACE_FREE(m_range);
+ DeleteInteropSafe(m_range);
+
+ m_range = NULL;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Return true if this stepper is alive, but frozen. (we freeze when the stepper
+// enters a nested func-eval).
+//-----------------------------------------------------------------------------
+bool DebuggerStepper::IsFrozen()
+{
+ return (m_cFuncEvalNesting > 0);
+}
+
+//-----------------------------------------------------------------------------
+// Returns true if this stepper is 'dead' - which happens if a non-frozen stepper
+// gets a func-eval exit.
+//-----------------------------------------------------------------------------
+bool DebuggerStepper::IsDead()
+{
+ return (m_cFuncEvalNesting < 0);
+}
+
+// * ------------------------------------------------------------------------
+// * DebuggerJMCStepper routines
+// * ------------------------------------------------------------------------
+DebuggerJMCStepper::DebuggerJMCStepper(Thread *thread,
+ CorDebugUnmappedStop rgfMappingStop,
+ CorDebugIntercept interceptStop,
+ AppDomain *appDomain) :
+ DebuggerStepper(thread, rgfMappingStop, interceptStop, appDomain)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DJMCStepper ctor, this=%p\n", this));
+}
+
+DebuggerJMCStepper::~DebuggerJMCStepper()
+{
+ LOG((LF_CORDB, LL_INFO10000, "DJMCStepper dtor, this=%p\n", this));
+}
+
+// If we're a JMC stepper, then don't stop in non-user code.
+bool DebuggerJMCStepper::IsInterestingFrame(FrameInfo * pFrame)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ DebuggerMethodInfo *pInfo = pFrame->GetMethodInfoFromFrameOrThrow();
+ _ASSERTE(pInfo != NULL); // throws on failure
+
+ bool fIsUserCode = pInfo->IsJMCFunction();
+
+
+ LOG((LF_CORDB, LL_INFO1000000, "DS::TSO, frame '%s::%s' is '%s' code\n",
+ pFrame->DbgGetClassName(), pFrame->DbgGetMethodName(),
+ fIsUserCode ? "user" : "non-user"));
+
+ return fIsUserCode;
+}
+
+// A JMC stepper's step-next stops at the next thing of code run.
+// This may be a Step-Out, or any User code called before that.
+// A1 -> B1 -> { A2, B2 -> B3 -> A3}
+// So TrapStepNex at end of A2 should land us in A3.
+void DebuggerJMCStepper::TrapStepNext(ControllerStackInfo *info)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TrapStepNext, this=%p\n", this));
+ EnableMethodEnter();
+
+ // This will place a patch up the stack and set m_reason = STEP_RETURN.
+ // If we end up hitting JMC before that patch, we'll hit TriggerMethodEnter
+ // and that will set our reason to STEP_CALL.
+ TrapStepOut(info);
+}
+
+// ip - target address for call instruction
+bool DebuggerJMCStepper::TrapStepInHelper(
+ ControllerStackInfo * pInfo,
+ const BYTE * ipCallTarget,
+ const BYTE * ipNext,
+ bool fCallingIntoFunclet)
+{
+#ifndef WIN64EXCEPTIONS
+ // There are no funclets on x86.
+ _ASSERTE(!fCallingIntoFunclet);
+#endif
+
+ // If we are calling into a funclet, then we can't rely on the JMC probe to stop us because there are no
+ // JMC probes in funclets. Instead, we have to perform a traditional step-in here.
+ if (fCallingIntoFunclet)
+ {
+ TraceDestination td;
+ td.InitForManaged(reinterpret_cast<PCODE>(ipCallTarget));
+ PatchTrace(&td, LEAF_MOST_FRAME, false);
+
+ // If this succeeds, then we still need to put a patch at the return address. This is done below.
+ // If this fails, then we definitely need to put a patch at the return address to trap the thread.
+ // So in either case, we have to execute the rest of this function.
+ }
+
+ MethodDesc * pDesc = pInfo->m_activeFrame.md;
+ DebuggerJitInfo *dji = NULL;
+
+ // We may not have a DJI if we're in an attach case. We should still be able to do a JMC-step in though.
+ // So NULL is ok here.
+ dji = g_pDebugger->GetJitInfo(pDesc, (const BYTE*) ipNext);
+
+
+ // Place patch after call, which is at ipNext. Note we don't need an IL->Native map here
+ // since we disassembled native code to find the ip after the call.
+ SIZE_T offset = CodeRegionInfo::GetCodeRegionInfo(dji, pDesc).AddressToOffset(ipNext);
+
+
+ LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TSIH, at '%s::%s', calling=0x%p, next=0x%p, offset=%d\n",
+ pDesc->m_pszDebugClassName,
+ pDesc->m_pszDebugMethodName,
+ ipCallTarget, ipNext,
+ offset));
+
+ // Place a patch at the native address (inside the managed method).
+ AddBindAndActivateNativeManagedPatch(pInfo->m_activeFrame.md,
+ dji,
+ offset,
+ pInfo->m_returnFrame.fp,
+ NULL);
+
+ EnableMethodEnter();
+
+ // Return true means that we want to let the stepper run free. It will either
+ // hit the patch after the call instruction or it will hit a TriggerMethodEnter.
+ return true;
+}
+
+// For JMC-steppers, we don't enable trace-call; we enable Method-Enter.
+void DebuggerJMCStepper::EnablePolyTraceCall()
+{
+ _ASSERTE(!IsFrozen());
+
+ this->EnableMethodEnter();
+}
+
+// Return true if this is non-user code. This means we've setup the proper patches &
+// triggers, etc and so we expect the controller to just run free.
+// This is called when all other stepping criteria are met and we're about to
+// send a step-complete. For JMC, this is when we see if we're in non-user code
+// and if so, continue stepping instead of send the step complete.
+// Return false if this is user-code.
+bool DebuggerJMCStepper::DetectHandleNonUserCode(ControllerStackInfo *pInfo, DebuggerMethodInfo * dmi)
+{
+ _ASSERTE(dmi != NULL);
+ bool fIsUserCode = dmi->IsJMCFunction();
+
+ if (!fIsUserCode)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "JMC stepper stopped in non-user code, continuing.\n"));
+ // Not-user code, we want to skip through this.
+
+ // We may be here while trying to step-out.
+ // Step-out just means stop at the first interesting frame above us.
+ // So JMC TrapStepOut won't patch a non-user frame.
+ // But if we're skipping over other stuff (prolog, epilog, interceptors,
+ // trace calls), then we may still be in the middle of non-user
+ //_ASSERTE(m_eMode != cStepOut);
+
+ if (m_eMode == cStepOut)
+ {
+ TrapStepOut(pInfo);
+ }
+ else if (m_stepIn)
+ {
+ EnableMethodEnter();
+ TrapStepOut(pInfo);
+ // Run until we hit the next thing of managed code.
+ } else {
+ // Do a traditional step-out since we just want to go up 1 frame.
+ TrapStepOut(pInfo, true); // force trad step out.
+
+
+ // If we're not in the original frame anymore, then
+ // If we did a Step-over at the end of a method, and that did a single-step over the return
+ // then we may already be in our parent frame. In that case, we also want to behave
+ // like a step-in and TriggerMethodEnter.
+ if (this->m_fp != pInfo->m_activeFrame.fp)
+ {
+ // If we're a step-over, then we should only be stopped in a parent frame.
+ _ASSERTE(m_stepIn || IsCloserToLeaf(this->m_fp, pInfo->m_activeFrame.fp));
+ EnableMethodEnter();
+ }
+
+ // Step-over shouldn't stop in a frame below us in the same callstack.
+ // So we do a tradional step-out of our current frame, which guarantees
+ // that. After that, we act just like a step-in.
+ m_stepIn = true;
+ }
+ EnableUnwind(m_fp);
+
+ // Must keep going...
+ return true;
+ }
+
+ return false;
+}
+
+// Dispatched right after the prolog of a JMC function.
+// We may be blocking the GC here, so let's be fast!
+void DebuggerJMCStepper::TriggerMethodEnter(Thread * thread,
+ DebuggerJitInfo *dji,
+ const BYTE * ip,
+ FramePointer fp)
+{
+ _ASSERTE(dji != NULL);
+ _ASSERTE(thread != NULL);
+ _ASSERTE(ip != NULL);
+
+ _ASSERTE(!IsFrozen());
+
+ MethodDesc * pDesc = dji->m_fd;
+ LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, desc=%p, addr=%p\n",
+ pDesc, ip));
+
+ // JMC steppers won't stop in Lightweight delegates. Just return & keep executing.
+ if (pDesc->IsNoMetadata())
+ {
+ LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TME, skipping b/c it's lw-codegen\n"));
+ return;
+ }
+
+ // Is this user code?
+ DebuggerMethodInfo * dmi = dji->m_methodInfo;
+ bool fIsUserCode = dmi->IsJMCFunction();
+
+
+ LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TME, '%s::%s' is '%s' code\n",
+ pDesc->m_pszDebugClassName,
+ pDesc->m_pszDebugMethodName,
+ fIsUserCode ? "user" : "non-user"
+ ));
+
+ // If this isn't user code, then just return and continue executing.
+ if (!fIsUserCode)
+ return;
+
+ // MethodEnter is only enabled when we want to stop in a JMC function.
+ // And that's where we are now. So patch the ip and resume.
+ // The stepper will hit the patch, and stop.
+
+ // It's a good thing we have the fp passed in, because we have no other
+ // way of getting it. We can't do a stack trace here (the stack trace
+ // would start at the last pushed Frame, which miss a lot of managed
+ // frames).
+
+ // Don't bind to a particular AppDomain so that we can do a Cross-Appdomain step.
+ AddBindAndActivateNativeManagedPatch(pDesc,
+ dji,
+ CodeRegionInfo::GetCodeRegionInfo(dji, pDesc).AddressToOffset(ip),
+ fp,
+ NULL // AppDomain
+ );
+
+ LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, after setting patch to stop\n"));
+
+ // Once we resume, we'll go hit that patch (duh, we patched our return address)
+ // Furthermore, we know the step will complete with reason = call, so set that now.
+ m_reason = STEP_CALL;
+}
+
+
+
+//-----------------------------------------------------------------------------
+// Helper to convert form an EE Frame's interception enum to a CorDebugIntercept
+// bitfield.
+// The intercept value in EE Frame's is a 0-based enumeration (not a bitfield).
+// The intercept value for ICorDebug is a bitfied.
+//-----------------------------------------------------------------------------
+CorDebugIntercept ConvertFrameBitsToDbg(Frame::Interception i)
+{
+ _ASSERTE(i >= 0 && i < Frame::INTERCEPTION_COUNT);
+
+ // Since the ee frame is a 0-based enum, we can just use a map.
+ const CorDebugIntercept map[Frame::INTERCEPTION_COUNT] =
+ {
+ // ICorDebug EE Frame
+ INTERCEPT_NONE, // INTERCEPTION_NONE,
+ INTERCEPT_CLASS_INIT, // INTERCEPTION_CLASS_INIT
+ INTERCEPT_EXCEPTION_FILTER, // INTERCEPTION_EXCEPTION
+ INTERCEPT_CONTEXT_POLICY, // INTERCEPTION_CONTEXT
+ INTERCEPT_SECURITY, // INTERCEPTION_SECURITY
+ INTERCEPT_INTERCEPTION, // INTERCEPTION_OTHER
+ };
+
+ return map[i];
+}
+
+//-----------------------------------------------------------------------------
+// This is a helper class to do a stack walk over a certain range and find all the interceptors.
+// This allows a JMC stepper to see if there are any interceptors it wants to skip over (though
+// there's nothing JMC-specific about this).
+// Note that we only want to walk the stack range that the stepper is operating in.
+// That's because we don't care about interceptors that happened _before_ the
+// stepper was created.
+//-----------------------------------------------------------------------------
+class InterceptorStackInfo
+{
+public:
+#ifdef _DEBUG
+ InterceptorStackInfo()
+ {
+ // since this ctor just nulls out fpTop (which is already done in Init), we
+ // only need it in debug.
+ m_fpTop = LEAF_MOST_FRAME;
+ }
+#endif
+
+ // Get a CorDebugIntercept bitfield that contains a bit for each type of interceptor
+ // if that interceptor is present within our stack-range.
+ // Stack range is from leaf-most up to and including fp
+ CorDebugIntercept GetInterceptorsInRange()
+ {
+ _ASSERTE(m_fpTop != LEAF_MOST_FRAME || !"Must call Init first");
+ return (CorDebugIntercept) m_bits;
+ }
+
+ // Prime the stackwalk.
+ void Init(FramePointer fpTop, Thread *thread, CONTEXT *pContext, BOOL contextValid)
+ {
+ _ASSERTE(fpTop != LEAF_MOST_FRAME);
+ _ASSERTE(thread != NULL);
+
+ m_bits = 0;
+ m_fpTop = fpTop;
+
+ LOG((LF_CORDB,LL_EVERYTHING, "ISI::Init - fpTop=%p, thread=%p, pContext=%p, contextValid=%d\n",
+ fpTop.GetSPValue(), thread, pContext, contextValid));
+
+ int result;
+ result = DebuggerWalkStack(
+ thread,
+ LEAF_MOST_FRAME,
+ pContext,
+ contextValid,
+ WalkStack,
+ (void *) this,
+ FALSE
+ );
+ }
+
+
+protected:
+ // This is a bitfield of all the interceptors we encounter in our stack-range
+ int m_bits;
+
+ // This is the top of our stack range.
+ FramePointer m_fpTop;
+
+ static StackWalkAction WalkStack(FrameInfo *pInfo, void *data)
+ {
+ _ASSERTE(pInfo != NULL);
+ _ASSERTE(data != NULL);
+ InterceptorStackInfo * pThis = (InterceptorStackInfo*) data;
+
+ // If there's an interceptor frame here, then set those
+ // bits in our bitfield.
+ Frame::Interception i = Frame::INTERCEPTION_NONE;
+ Frame * pFrame = pInfo->frame;
+ if ((pFrame != NULL) && (pFrame != FRAME_TOP))
+ {
+ i = pFrame->GetInterception();
+ if (i != Frame::INTERCEPTION_NONE)
+ {
+ pThis->m_bits |= (int) ConvertFrameBitsToDbg(i);
+ }
+ }
+ else if (pInfo->HasMethodFrame())
+ {
+ // Check whether we are executing in a class constructor.
+ _ASSERTE(pInfo->md != NULL);
+
+ // Need to be careful about an off-by-one error here! Imagine your stack looks like:
+ // Foo.DoSomething()
+ // Foo..cctor <--- step starts/ends in here
+ // Bar.Bar();
+ //
+ // and your code looks like this:
+ // Foo..cctor()
+ // {
+ // Foo.DoSomething(); <-- JMC step started here
+ // int x = 1; <-- step ends here
+ // }
+ // This stackwalk covers the inclusive range [Foo..cctor, Foo.DoSomething()] so we will see
+ // the static cctor in this walk. However executing inside a static class constructor does not
+ // count as an interceptor. You must start the step outside the static constructor and then call
+ // into it to have an interceptor. Therefore only static constructors that aren't the outermost
+ // frame should be treated as interceptors.
+ if (pInfo->md->IsClassConstructor() && (pInfo->fp != pThis->m_fpTop))
+ {
+ // We called a class constructor, add the appropriate flag
+ pThis->m_bits |= (int) INTERCEPT_CLASS_INIT;
+ }
+ }
+ LOG((LF_CORDB,LL_EVERYTHING,"ISI::WS- Frame=%p, fp=%p, Frame bits=%x, Cor bits=0x%x\n", pInfo->frame, pInfo->fp.GetSPValue(), i, pThis->m_bits));
+
+
+ // We can stop once we hit the top frame.
+ if (pInfo->fp == pThis->m_fpTop)
+ {
+ return SWA_ABORT;
+ }
+ else
+ {
+ return SWA_CONTINUE;
+ }
+ }
+};
+
+
+
+
+// Skip interceptors for JMC steppers.
+// Return true if we patch something (and thus should keep stepping)
+// Return false if we're done.
+bool DebuggerJMCStepper::DetectHandleInterceptors(ControllerStackInfo * info)
+{
+ LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: Start DetectHandleInterceptors\n"));
+
+ // For JMC, we could stop very far way from an interceptor.
+ // So we have to do a stack walk to search for interceptors...
+ // If we find any in our stack range (from m_fp ... current fp), then we just do a trap-step-next.
+
+ // Note that this logic should also work for regular steppers, but we've left that in
+ // as to keep that code-path unchanged.
+
+ // ControllerStackInfo only gives us the bottom 2 frames on the stack, so we ignore it and
+ // have to do our own stack walk.
+
+ // @todo - for us to properly skip filters, we need to make sure that filters show up in our chains.
+
+
+ InterceptorStackInfo info2;
+ CONTEXT *context = g_pEEInterface->GetThreadFilterContext(this->GetThread());
+ CONTEXT tempContext;
+
+ _ASSERTE(!ISREDIRECTEDTHREAD(this->GetThread()));
+
+ if (context == NULL)
+ {
+ info2.Init(this->m_fp, this->GetThread(), &tempContext, FALSE);
+ }
+ else
+ {
+ info2.Init(this->m_fp, this->GetThread(), context, TRUE);
+ }
+
+ // The following casts are safe on WIN64 platforms.
+ int iOnStack = (int) info2.GetInterceptorsInRange();
+ int iSkip = ~((int) m_rgfInterceptStop);
+
+ LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: iOnStack=%x, iSkip=%x\n", iOnStack, iSkip));
+
+ // If the bits on the stack contain any interceptors we want to skip, then we need to keep going.
+ if ((iOnStack & iSkip) != 0)
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: keep going!\n"));
+ TrapStepNext(info);
+ EnableUnwind(m_fp);
+ return true;
+ }
+
+ LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: Done!!\n"));
+ return false;
+}
+
+
+// * ------------------------------------------------------------------------
+// * DebuggerThreadStarter routines
+// * ------------------------------------------------------------------------
+
+DebuggerThreadStarter::DebuggerThreadStarter(Thread *thread)
+ : DebuggerController(thread, NULL)
+{
+ LOG((LF_CORDB, LL_INFO1000, "DTS::DTS: this:0x%x Thread:0x%x\n",
+ this, thread));
+
+ // Check to make sure we only have 1 ThreadStarter on a given thread. (Inspired by NDPWhidbey issue 16888)
+#if defined(_DEBUG)
+ EnsureUniqueThreadStarter(this);
+#endif
+}
+
+// TP_RESULT DebuggerThreadStarter::TriggerPatch() If we're in a
+// stub (module==NULL&&managed) then do a PatchTrace up the stack &
+// return false. Otherwise DisableAll & return
+// true
+TP_RESULT DebuggerThreadStarter::TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy)
+{
+ Module *module = patch->key.module;
+ BOOL managed = patch->IsManagedPatch();
+
+ LOG((LF_CORDB,LL_INFO1000, "DebuggerThreadStarter::TriggerPatch for thread 0x%x\n", Debugger::GetThreadIdHelper(thread)));
+
+ if (module == NULL && managed)
+ {
+ // This is a stub patch. If it was a TRACE_FRAME_PUSH that got us here, then the stub's frame is pushed now, so
+ // we tell the frame to apply the real patch. If we got here via a TRACE_MGR_PUSH, however, then there is no
+ // frame and we go back to the stub manager that generated the stub for where to patch next.
+ TraceDestination trace;
+ bool traceOk;
+ if (patch->trace.GetTraceType() == TRACE_MGR_PUSH)
+ {
+ BYTE *dummy = NULL;
+ CONTEXT *context = GetManagedLiveCtx(thread);
+ CONTRACT_VIOLATION(GCViolation);
+ traceOk = g_pEEInterface->TraceManager(thread, patch->trace.GetStubManager(), &trace, context, &dummy);
+ }
+ else if ((patch->trace.GetTraceType() == TRACE_FRAME_PUSH) && (thread->GetFrame()->IsTransitionToNativeFrame()))
+ {
+ // If we've got a frame that is transitioning to native, there's no reason to try to keep tracing. So we
+ // bail early and save ourselves some effort. This also works around a problem where we deadlock trying to
+ // do too much work to determine the destination of a ComPlusMethodFrame. (See issue 87103.)
+ //
+ // Note: trace call is still enabled, so we can just ignore this patch and wait for trace call to fire
+ // again...
+ return TPR_IGNORE;
+ }
+ else
+ {
+ // It's questionable whether Trace_Frame_Push is actually safe or not.
+ ControllerStackInfo csi;
+ StackTraceTicket ticket(patch);
+ csi.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, NULL);
+
+ CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers
+ traceOk = g_pEEInterface->TraceFrame(thread, thread->GetFrame(), TRUE, &trace, &(csi.m_activeFrame.registers));
+ }
+
+ if (traceOk && g_pEEInterface->FollowTrace(&trace))
+ {
+ PatchTrace(&trace, LEAF_MOST_FRAME, TRUE);
+ }
+
+ return TPR_IGNORE;
+ }
+ else
+ {
+ // We've hit user code; trigger our event.
+ DisableAll();
+
+
+ {
+
+ // Give the helper thread a chance to get ready. The temporary helper can't handle
+ // execution control well, and the RS won't do any execution control until it gets a
+ // create Thread event, which it won't get until here.
+ // So now's our best time to wait for the real helper thread.
+ g_pDebugger->PollWaitingForHelper();
+ }
+
+ return TPR_TRIGGER;
+ }
+}
+
+void DebuggerThreadStarter::TriggerTraceCall(Thread *thread, const BYTE *ip)
+{
+ LOG((LF_CORDB, LL_EVERYTHING, "DTS::TTC called\n"));
+#ifdef DEBUGGING_SUPPORTED
+ if (thread->GetDomain()->IsDebuggerAttached())
+ {
+ TraceDestination trace;
+
+ if (g_pEEInterface->TraceStub(ip, &trace) && g_pEEInterface->FollowTrace(&trace))
+ {
+ PatchTrace(&trace, LEAF_MOST_FRAME, true);
+ }
+ }
+#endif //DEBUGGING_SUPPORTED
+
+}
+
+bool DebuggerThreadStarter::SendEvent(Thread *thread, bool fIpChanged)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ SENDEVENT_CONTRACT_ITEMS;
+ }
+ CONTRACTL_END;
+
+ // This SendEvent can't be interupted by a SetIp because until the client
+ // gets a ThreadStarter event, it doesn't even know the thread exists, so
+ // it certainly can't change its ip.
+ _ASSERTE(!fIpChanged);
+
+ LOG((LF_CORDB, LL_INFO10000, "DTS::SE: in DebuggerThreadStarter's SendEvent\n"));
+
+ // Send the thread started event.
+ g_pDebugger->ThreadStarted(thread);
+
+ // We delete this now because its no longer needed. We can call
+ // delete here because the queued count is above 0. This object
+ // will really be deleted when its dequeued shortly after this
+ // call returns.
+ Delete();
+
+ return true;
+}
+
+// * ------------------------------------------------------------------------
+// * DebuggerUserBreakpoint routines
+// * ------------------------------------------------------------------------
+
+bool DebuggerUserBreakpoint::IsFrameInDebuggerNamespace(FrameInfo * pFrame)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Steppers ignore internal frames, so should only be called on real frames.
+ _ASSERTE(pFrame->HasMethodFrame());
+
+ // Now get the namespace of the active frame
+ MethodDesc *pMD = pFrame->md;
+
+ if (pMD != NULL)
+ {
+ MethodTable * pMT = pMD->GetMethodTable();
+
+ LPCUTF8 szNamespace = NULL;
+ LPCUTF8 szClassName = pMT->GetFullyQualifiedNameInfo(&szNamespace);
+
+ if (szClassName != NULL && szNamespace != NULL)
+ {
+ MAKE_WIDEPTR_FROMUTF8(wszNamespace, szNamespace); // throw
+ MAKE_WIDEPTR_FROMUTF8(wszClassName, szClassName);
+ if (wcscmp(wszClassName, W("Debugger")) == 0 &&
+ wcscmp(wszNamespace, W("System.Diagnostics")) == 0)
+ {
+ // This will continue stepping
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+// Helper check if we're directly in a dynamic method (ignoring any chain goo
+// or stuff in the Debugger namespace.
+class IsLeafFrameDynamic
+{
+protected:
+ static StackWalkAction WalkStackWrapper(FrameInfo *pInfo, void *data)
+ {
+ IsLeafFrameDynamic * pThis = reinterpret_cast<IsLeafFrameDynamic*> (data);
+ return pThis->WalkStack(pInfo);
+ }
+
+ StackWalkAction WalkStack(FrameInfo *pInfo)
+ {
+ _ASSERTE(pInfo != NULL);
+
+ // A FrameInfo may have both Method + Chain rolled into one.
+ if (!pInfo->HasMethodFrame() && !pInfo->HasStubFrame())
+ {
+ // We're a chain. Ignore it and keep looking.
+ return SWA_CONTINUE;
+ }
+
+ // So now this is the first non-chain, non-Debugger namespace frame.
+ // LW frames don't have a name, so we check if it's LW first.
+ if (pInfo->eStubFrameType == STUBFRAME_LIGHTWEIGHT_FUNCTION)
+ {
+ m_fInLightWeightMethod = true;
+ return SWA_ABORT;
+ }
+
+ // Ignore Debugger.Break() frames.
+ // All Debugger.Break calls will have this on the stack.
+ if (DebuggerUserBreakpoint::IsFrameInDebuggerNamespace(pInfo))
+ {
+ return SWA_CONTINUE;
+ }
+
+ // We've now determined leafmost thing, so stop stackwalking.
+ _ASSERTE(m_fInLightWeightMethod == false);
+ return SWA_ABORT;
+ }
+
+
+ bool m_fInLightWeightMethod;
+
+ // Need this context to do stack trace.
+ CONTEXT m_tempContext;
+
+public:
+ // On success, copies the leafmost non-chain frameinfo (including stubs) for the current thread into pInfo
+ // and returns true.
+ // On failure, returns false.
+ // Return true on success.
+ bool DoCheck(IN Thread * pThread)
+ {
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pThread));
+ }
+ CONTRACTL_END;
+
+ m_fInLightWeightMethod = false;
+
+
+ DebuggerWalkStack(
+ pThread,
+ LEAF_MOST_FRAME,
+ &m_tempContext, false,
+ WalkStackWrapper,
+ (void *) this,
+ TRUE // includes everything
+ );
+
+ // We don't care whether the stackwalk succeeds or not because the
+ // callback sets our status via this field either way, so just return it.
+ return m_fInLightWeightMethod;
+ };
+};
+
+// Handle a Debug.Break() notification.
+// This may create a controller to step-out out the Debug.Break() call (so that
+// we appear stopped at the callsite).
+// If we can't step-out (eg, we're directly in a dynamic method), then send
+// the debug event immediately.
+void DebuggerUserBreakpoint::HandleDebugBreak(Thread * pThread)
+{
+ bool fDoStepOut = true;
+
+ // If the leaf frame is not a LW method, then step-out.
+ IsLeafFrameDynamic info;
+ fDoStepOut = !info.DoCheck(pThread);
+
+ if (fDoStepOut)
+ {
+ // Create a controller that will step out for us.
+ new (interopsafe) DebuggerUserBreakpoint(pThread);
+ }
+ else
+ {
+ // Send debug event immediately.
+ g_pDebugger->SendUserBreakpointAndSynchronize(pThread);
+ }
+}
+
+
+DebuggerUserBreakpoint::DebuggerUserBreakpoint(Thread *thread)
+ : DebuggerStepper(thread, (CorDebugUnmappedStop) (STOP_ALL & ~STOP_UNMANAGED), INTERCEPT_ALL, NULL)
+{
+ // Setup a step out from the current frame (which we know is
+ // unmanaged, actually...)
+
+
+ // This happens to be safe, but it's a very special case (so we have a special case ticket)
+ // This is called while we're live (so no filter context) and from the fcall,
+ // and we pushed a HelperMethodFrame to protect us. We also happen to know that we have
+ // done anything illegal or dangerous since then.
+
+ StackTraceTicket ticket(this);
+ StepOut(LEAF_MOST_FRAME, ticket);
+}
+
+
+// Is this frame interesting?
+// Use this to skip all code in the namespace "Debugger.Diagnostics"
+bool DebuggerUserBreakpoint::IsInterestingFrame(FrameInfo * pFrame)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MODE_ANY;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ return !IsFrameInDebuggerNamespace(pFrame);
+}
+
+bool DebuggerUserBreakpoint::SendEvent(Thread *thread, bool fIpChanged)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ SENDEVENT_CONTRACT_ITEMS;
+ }
+ CONTRACTL_END;
+
+ // See DebuggerStepper::SendEvent for why we assert here.
+ // This is technically an issue, but it's too benign to fix.
+ _ASSERTE(!fIpChanged);
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "DUB::SE: in DebuggerUserBreakpoint's SendEvent\n"));
+
+ // Send the user breakpoint event.
+ g_pDebugger->SendRawUserBreakpoint(thread);
+
+ // We delete this now because its no longer needed. We can call
+ // delete here because the queued count is above 0. This object
+ // will really be deleted when its dequeued shortly after this
+ // call returns.
+ Delete();
+
+ return true;
+}
+
+// * ------------------------------------------------------------------------
+// * DebuggerFuncEvalComplete routines
+// * ------------------------------------------------------------------------
+
+DebuggerFuncEvalComplete::DebuggerFuncEvalComplete(Thread *thread,
+ void *dest)
+ : DebuggerController(thread, NULL)
+{
+#ifdef _TARGET_ARM_
+ m_pDE = reinterpret_cast<DebuggerEval*>(((DWORD)dest) & ~THUMB_CODE);
+#else
+ m_pDE = reinterpret_cast<DebuggerEval*>(dest);
+#endif
+
+ // Add an unmanaged patch at the destination.
+ AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE*)dest, LEAF_MOST_FRAME, FALSE, TRACE_UNMANAGED);
+}
+
+TP_RESULT DebuggerFuncEvalComplete::TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy)
+{
+
+ // It had better be an unmanaged patch...
+ _ASSERTE((patch->key.module == NULL) && !patch->IsManagedPatch());
+
+ // set ThreadFilterContext back here because we need make stack crawlable! In case,
+ // GC got triggered.
+
+ // Restore the thread's context to what it was before we hijacked it for this func eval.
+ CONTEXT *pCtx = GetManagedLiveCtx(thread);
+ CORDbgCopyThreadContext(reinterpret_cast<DT_CONTEXT *>(pCtx),
+ reinterpret_cast<DT_CONTEXT *>(&(m_pDE->m_context)));
+
+ // We've hit our patch, so simply disable all (which removes the
+ // patch) and trigger the event.
+ DisableAll();
+ return TPR_TRIGGER;
+}
+
+bool DebuggerFuncEvalComplete::SendEvent(Thread *thread, bool fIpChanged)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ SENDEVENT_CONTRACT_ITEMS;
+ }
+ CONTRACTL_END;
+
+
+ // This should not ever be interupted by a SetIp.
+ // The BP will be off in random native code for which SetIp would be illegal.
+ // However, func-eval conroller will restore the context from when we're at the patch,
+ // so that will look like the IP changed on us.
+ _ASSERTE(fIpChanged);
+
+ LOG((LF_CORDB, LL_INFO10000, "DFEC::SE: in DebuggerFuncEval's SendEvent\n"));
+
+ _ASSERTE(!ISREDIRECTEDTHREAD(thread));
+
+ // The DebuggerEval is at our faulting address.
+ DebuggerEval *pDE = m_pDE;
+
+ // Send the func eval complete (or exception) event.
+ g_pDebugger->FuncEvalComplete(thread, pDE);
+
+ // We delete this now because its no longer needed. We can call
+ // delete here because the queued count is above 0. This object
+ // will really be deleted when its dequeued shortly after this
+ // call returns.
+ Delete();
+
+ return true;
+}
+
+#ifdef EnC_SUPPORTED
+
+// * ------------------------------------------------------------------------ *
+// * DebuggerEnCBreakpoint routines
+// * ------------------------------------------------------------------------ *
+
+//---------------------------------------------------------------------------------------
+//
+// DebuggerEnCBreakpoint constructor - creates and activates a new EnC breakpoint
+//
+// Arguments:
+// offset - native offset in the function to place the patch
+// jitInfo - identifies the function in which the breakpoint is being placed
+// fTriggerType - breakpoint type: either REMAP_PENDING or REMAP_COMPLETE
+// pAppDomain - the breakpoint applies to the specified AppDomain only
+//
+
+DebuggerEnCBreakpoint::DebuggerEnCBreakpoint(SIZE_T offset,
+ DebuggerJitInfo *jitInfo,
+ DebuggerEnCBreakpoint::TriggerType fTriggerType,
+ AppDomain *pAppDomain)
+ : DebuggerController(NULL, pAppDomain),
+ m_fTriggerType(fTriggerType),
+ m_jitInfo(jitInfo)
+{
+ _ASSERTE( jitInfo != NULL );
+ // Add and activate the specified patch
+ AddBindAndActivateNativeManagedPatch(jitInfo->m_fd, jitInfo, offset, LEAF_MOST_FRAME, pAppDomain);
+ LOG((LF_ENC,LL_INFO1000, "DEnCBPDEnCBP::adding %S patch!\n",
+ fTriggerType == REMAP_PENDING ? W("remap pending") : W("remap complete")));
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// DebuggerEnCBreakpoint::TriggerPatch
+// called by the debugging infrastructure when the patch is hit.
+//
+// Arguments:
+// patch - specifies the patch that was hit
+// thread - identifies the thread on which the patch was hit
+// tyWhy - TY_SHORT_CIRCUIT for normal REMAP_PENDING EnC patches
+//
+// Return value:
+// TPR_IGNORE if the debugger chooses not to take a remap opportunity
+// TPR_IGNORE_AND_STOP when a remap-complete event is sent
+// Doesn't return at all if the debugger remaps execution to the new version of the method
+//
+TP_RESULT DebuggerEnCBreakpoint::TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy)
+{
+ _ASSERTE(HasLock());
+
+ Module *module = patch->key.module;
+ mdMethodDef md = patch->key.md;
+ SIZE_T offset = patch->offset;
+
+ // Map the current native offset back to the IL offset in the old
+ // function. This will be mapped to the new native offset within
+ // ResumeInUpdatedFunction
+ CorDebugMappingResult map;
+ DWORD which;
+ SIZE_T currentIP = (SIZE_T)m_jitInfo->MapNativeOffsetToIL(offset,
+ &map, &which);
+
+ // We only lay DebuggerEnCBreakpoints at sequence points
+ _ASSERTE(map == MAPPING_EXACT);
+
+ LOG((LF_ENC, LL_ALWAYS,
+ "DEnCBP::TP: triggered E&C %S breakpoint: tid=0x%x, module=0x%08x, "
+ "method def=0x%08x, version=%d, native offset=0x%x, IL offset=0x%x\n this=0x%x\n",
+ m_fTriggerType == REMAP_PENDING ? W("ResumePending") : W("ResumeComplete"),
+ thread, module, md, m_jitInfo->m_encVersion, offset, currentIP, this));
+
+ // If this is a REMAP_COMPLETE patch, then dispatch the RemapComplete callback
+ if (m_fTriggerType == REMAP_COMPLETE)
+ {
+ return HandleRemapComplete(patch, thread, tyWhy);
+ }
+
+ // This must be a REMAP_PENDING patch
+ // unless we got here on an explicit short-circuit, don't do any work
+ if (tyWhy != TY_SHORT_CIRCUIT)
+ {
+ LOG((LF_ENC, LL_ALWAYS, "DEnCBP::TP: not short-circuit ... bailing\n"));
+ return TPR_IGNORE;
+ }
+
+ _ASSERTE(patch->IsManagedPatch());
+
+ // Grab the MethodDesc for this function.
+ _ASSERTE(module != NULL);
+
+ // GENERICS: @todo generics. This should be replaced by a similar loop
+ // over the DJIs for the DMI as in BindPatch up above.
+ MethodDesc *pFD = g_pEEInterface->FindLoadedMethodRefOrDef(module, md);
+
+ _ASSERTE(pFD != NULL);
+
+ LOG((LF_ENC, LL_ALWAYS,
+ "DEnCBP::TP: in %s::%s\n", pFD->m_pszDebugClassName,pFD->m_pszDebugMethodName));
+
+ // Grab the jit info for the original copy of the method, which is
+ // what we are executing right now.
+ DebuggerJitInfo *pJitInfo = m_jitInfo;
+ _ASSERTE(pJitInfo);
+ _ASSERTE(pJitInfo->m_fd == pFD);
+
+ // Grab the context for this thread. This is the context that was
+ // passed to COMPlusFrameHandler.
+ CONTEXT *pContext = GetManagedLiveCtx(thread);
+
+ // We use the module the current function is in.
+ _ASSERTE(module->IsEditAndContinueEnabled());
+ EditAndContinueModule *pModule = (EditAndContinueModule*)module;
+
+ // Release the controller lock for the rest of this method
+ CrstBase::UnsafeCrstInverseHolder inverseLock(&g_criticalSection);
+
+ // resumeIP is the native offset in the new version of the method the debugger wants
+ // to resume to. We'll pass the address of this variable over to the right-side
+ // and if it modifies the contents while we're stopped dispatching the RemapOpportunity,
+ // then we know it wants a remap.
+ // This form of side-channel communication seems like an error-prone workaround. Ideally the
+ // remap IP (if any) would just be returned in a response event.
+ SIZE_T resumeIP = (SIZE_T) -1;
+
+ // Debugging code to enable a break after N RemapOpportunities
+#ifdef _DEBUG
+ static int breakOnRemapOpportunity = -1;
+ if (breakOnRemapOpportunity == -1)
+ breakOnRemapOpportunity = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EnCBreakOnRemapOpportunity);
+
+ static int remapOpportunityCount = 0;
+
+ ++remapOpportunityCount;
+ if (breakOnRemapOpportunity == 1 || breakOnRemapOpportunity == remapOpportunityCount)
+ {
+ _ASSERTE(!"BreakOnRemapOpportunity");
+ }
+#endif
+
+ // Send an event to the RS to call the RemapOpportunity callback, passing the address of resumeIP.
+ // If the debugger responds with a call to RemapFunction, the supplied IP will be copied into resumeIP
+ // and we will know to update the context and resume the function at the new IP. Otherwise we just do
+ // nothing and try again on next RemapFunction breakpoint
+ g_pDebugger->LockAndSendEnCRemapEvent(pJitInfo, currentIP, &resumeIP);
+
+ LOG((LF_ENC, LL_ALWAYS,
+ "DEnCBP::TP: resume IL offset is 0x%x\n", resumeIP));
+
+ // Has the debugger requested a remap?
+ if (resumeIP != (SIZE_T) -1)
+ {
+ // This will jit the function, update the context, and resume execution at the new location.
+ g_pEEInterface->ResumeInUpdatedFunction(pModule,
+ pFD,
+ (void*)pJitInfo,
+ resumeIP,
+ pContext);
+ _ASSERTE(!"Returned from ResumeInUpdatedFunction!");
+ }
+
+ LOG((LF_CORDB, LL_ALWAYS, "DEnCB::TP: We've returned from ResumeInUpd"
+ "atedFunction, we're going to skip the EnC patch ####\n"));
+
+ // We're returning then we'll have to re-get this lock. Be careful that we haven't kept any controller/patches
+ // in the caller. They can move when we unlock, so when we release the lock and reget it here, things might have
+ // changed underneath us.
+ // inverseLock holder will reaquire lock.
+
+ return TPR_IGNORE;
+}
+
+//
+// HandleResumeComplete is called for an EnC patch in the newly updated function
+// so that we can notify the debugger that the remap has completed and they can
+// now remap their steppers or anything else that depends on the new code actually
+// being on the stack. We return TPR_IGNORE_AND_STOP because it's possible that the
+// function was edited after we handled remap complete and want to make sure we
+// start a fresh call to TriggerPatch
+//
+TP_RESULT DebuggerEnCBreakpoint::HandleRemapComplete(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy)
+{
+ LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: HandleRemapComplete\n"));
+
+ // Debugging code to enable a break after N RemapCompletes
+#ifdef _DEBUG
+ static int breakOnRemapComplete = -1;
+ if (breakOnRemapComplete == -1)
+ breakOnRemapComplete = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EnCBreakOnRemapComplete);
+
+ static int remapCompleteCount = 0;
+ ++remapCompleteCount;
+ if (breakOnRemapComplete == 1 || breakOnRemapComplete == remapCompleteCount)
+ {
+ _ASSERTE(!"BreakOnRemapComplete");
+ }
+#endif
+ _ASSERTE(HasLock());
+
+
+ bool fApplied = m_jitInfo->m_encBreakpointsApplied;
+ // Need to delete this before unlock below so if any other thread come in after the unlock
+ // they won't handle this patch.
+ Delete();
+
+ // We just deleted ourselves. Can't access anything any instances after this point.
+
+ // if have somehow updated this function before we resume into it then just bail
+ if (fApplied)
+ {
+ LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: function already updated, ignoring\n"));
+ return TPR_IGNORE_AND_STOP;
+ }
+
+ // GENERICS: @todo generics. This should be replaced by a similar loop
+ // over the DJIs for the DMI as in BindPatch up above.
+ MethodDesc *pFD = g_pEEInterface->FindLoadedMethodRefOrDef(patch->key.module, patch->key.md);
+
+ LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: unlocking controller\n"));
+
+ // Unlock the controller lock and dispatch the remap complete event
+ CrstBase::UnsafeCrstInverseHolder inverseLock(&g_criticalSection);
+
+ LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: sending RemapCompleteEvent\n"));
+
+ g_pDebugger->LockAndSendEnCRemapCompleteEvent(pFD);
+
+ // We're returning then we'll have to re-get this lock. Be careful that we haven't kept any controller/patches
+ // in the caller. They can move when we unlock, so when we release the lock and reget it here, things might have
+ // changed underneath us.
+ // inverseLock holder will reacquire.
+
+ return TPR_IGNORE_AND_STOP;
+}
+#endif //EnC_SUPPORTED
+
+// continuable-exceptions
+// * ------------------------------------------------------------------------ *
+// * DebuggerContinuableExceptionBreakpoint routines
+// * ------------------------------------------------------------------------ *
+
+
+//---------------------------------------------------------------------------------------
+//
+// constructor
+//
+// Arguments:
+// pThread - the thread on which we are intercepting an exception
+// nativeOffset - This is the target native offset. It is where we are going to resume execution.
+// jitInfo - the DebuggerJitInfo of the method at which we are intercepting
+// pAppDomain - the AppDomain in which the thread is executing
+//
+
+DebuggerContinuableExceptionBreakpoint::DebuggerContinuableExceptionBreakpoint(Thread *pThread,
+ SIZE_T nativeOffset,
+ DebuggerJitInfo *jitInfo,
+ AppDomain *pAppDomain)
+ : DebuggerController(pThread, pAppDomain)
+{
+ _ASSERTE( jitInfo != NULL );
+ // Add a native patch at the specified native offset, which is where we are going to resume execution.
+ AddBindAndActivateNativeManagedPatch(jitInfo->m_fd, jitInfo, nativeOffset, LEAF_MOST_FRAME, pAppDomain);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// This function is called when the patch added in the constructor is hit. At this point,
+// we have already resumed execution, and the exception is no longer in flight.
+//
+// Arguments:
+// patch - the patch added in the constructor; unused
+// thread - the thread in question; unused
+// tyWhy - a flag which is only useful for EnC; unused
+//
+// Return Value:
+// This function always returns TPR_TRIGGER, meaning that it wants to send an event to notify the RS.
+//
+
+TP_RESULT DebuggerContinuableExceptionBreakpoint::TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DCEBP::TP\n"));
+
+ //
+ // Disable the patch
+ //
+ DisableAll();
+
+ // We will send a notification to the RS when the patch is triggered.
+ return TPR_TRIGGER;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// This function is called when we want to notify the RS that an interception is complete.
+// At this point, we have already resumed execution, and the exception is no longer in flight.
+//
+// Arguments:
+// thread - the thread in question
+// fIpChanged - whether the IP has changed by SetIP after the patch is hit but
+// before this function is called
+//
+
+bool DebuggerContinuableExceptionBreakpoint::SendEvent(Thread *thread, bool fIpChanged)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ SENDEVENT_CONTRACT_ITEMS;
+ }
+ CONTRACTL_END;
+
+
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "DCEBP::SE: in DebuggerContinuableExceptionBreakpoint's SendEvent\n"));
+
+ if (!fIpChanged)
+ {
+ g_pDebugger->SendInterceptExceptionComplete(thread);
+ }
+
+ // On WIN64, by the time we get here the DebuggerExState is gone already.
+ // ExceptionTrackers are cleaned up before we resume execution for a handled exception.
+#if !defined(WIN64EXCEPTIONS)
+ thread->GetExceptionState()->GetDebuggerState()->SetDebuggerInterceptContext(NULL);
+#endif // !WIN64EXCEPTIONS
+
+
+ //
+ // We delete this now because its no longer needed. We can call
+ // delete here because the queued count is above 0. This object
+ // will really be deleted when its dequeued shortly after this
+ // call returns.
+ //
+ Delete();
+
+ return true;
+}
+#endif // !DACCESS_COMPILE
diff --git a/src/debug/ee/controller.h b/src/debug/ee/controller.h
new file mode 100644
index 0000000000..0cfeb096fa
--- /dev/null
+++ b/src/debug/ee/controller.h
@@ -0,0 +1,1966 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: controller.h
+//
+
+//
+// Debugger control flow object
+//
+//*****************************************************************************
+
+#ifndef CONTROLLER_H_
+#define CONTROLLER_H_
+
+/* ========================================================================= */
+
+#if !defined(DACCESS_COMPILE)
+
+#include "frameinfo.h"
+
+/* ------------------------------------------------------------------------- *
+ * Forward declarations
+ * ------------------------------------------------------------------------- */
+
+class DebuggerPatchSkip;
+class DebuggerThreadStarter;
+class DebuggerController;
+class DebuggerControllerQueue;
+struct DebuggerControllerPatch;
+class DebuggerUserBreakpoint;
+class ControllerStackInfo;
+
+// Ticket for ensuring that it's safe to get a stack trace.
+class StackTraceTicket
+{
+public:
+ // Each ctor is a rule for why it's safety to run a stacktrace.
+
+ // Safe if we're at certain types of patches.
+ StackTraceTicket(DebuggerControllerPatch * patch);
+
+ // Safe if there was already another stack trace at this spot. (Grandfather clause)
+ StackTraceTicket(ControllerStackInfo * info);
+
+ // Safe it we're at a Synchronized point point.
+ StackTraceTicket(Thread * pThread);
+
+ // Safe b/c the context shows we're in native managed code
+ StackTraceTicket(const BYTE * ip);
+
+ // DebuggerUserBreakpoint has a special case of safety.
+ StackTraceTicket(DebuggerUserBreakpoint * p);
+
+ // This is like a contract violation.
+ // Unsafe tickets. Use as:
+ // StackTraceTicket ticket(StackTraceTicket::UNSAFE_TICKET);
+ enum EUNSAFE {
+ // Ticket is unsafe. Potential issue.
+ UNSAFE_TICKET = 0,
+
+ // For some wacky reason, it's safe to take a stacktrace here, but
+ // there's not an easily verifiable rule. Use this ticket very sparingly
+ // because it's much more difficult to verify.
+ SPECIAL_CASE_TICKET = 1
+ };
+ StackTraceTicket(EUNSAFE e) { };
+
+private:
+ // Tickets can't be copied around. Hide these definitions so to enforce that.
+ // We still need the Copy ctor so that it can be passed in as a parameter.
+ void operator=(StackTraceTicket & other);
+};
+
+/* ------------------------------------------------------------------------- *
+ * ControllerStackInfo utility
+ * ------------------------------------------------------------------------- *
+ * class ControllerStackInfo is a class designed
+ * to simply obtain a two-frame stack trace: it will obtain the bottommost
+ * framepointer (m_bottomFP), a given target frame (m_activeFrame), and the
+ * frame above the target frame (m_returnFrame). Note that the target frame
+ * may be the bottommost, 'active' frame, or it may be a frame higher up in
+ * the stack. ControllerStackInfo accomplishes this by starting at the
+ * bottommost frame and walking upwards until it reaches the target frame,
+ * whereupon it records the m_activeFrame info, gets called once more to
+ * fill in the m_returnFrame info, and thereafter stops the stack walk.
+ *
+ * public:
+ * void * m_bottomFP: Frame pointer for the
+ * bottommost (most active)
+ * frame. We can add more later, if we need it. Currently just used in
+ * TrapStep. NULL indicates an uninitialized value.
+ *
+ * void * m_targetFP: The frame pointer to the frame
+ * that we actually want the info of.
+ *
+ * bool m_targetFrameFound: Set to true if
+ * WalkStack finds the frame indicated by targetFP handed to GetStackInfo
+ * false otherwise.
+ *
+ * FrameInfo m_activeFrame: A FrameInfo
+ * describing the target frame. This should always be valid after a
+ * call to GetStackInfo.
+ *
+ * FrameInfo m_returnFrame: A FrameInfo
+ * describing the frame above the target frame, if target's
+ * return frame were found (call HasReturnFrame() to see if this is
+ * valid). Otherwise, this will be the same as m_activeFrame, above
+ *
+ * private:
+ * bool m_activeFound: Set to true if we found the target frame.
+ * bool m_returnFound: Set to true if we found the target's return frame.
+ */
+class ControllerStackInfo
+{
+public:
+ friend class StackTraceTicket;
+
+ ControllerStackInfo()
+ {
+ INDEBUG(m_dbgExecuted = false);
+ }
+
+ FramePointer m_bottomFP;
+ FramePointer m_targetFP;
+ bool m_targetFrameFound;
+
+ FrameInfo m_activeFrame;
+ FrameInfo m_returnFrame;
+
+ CorDebugChainReason m_specialChainReason;
+
+ // static StackWalkAction ControllerStackInfo::WalkStack() The
+ // callback that will be invoked by the DebuggerWalkStackProc.
+ // Note that the data argument is the "this" pointer to the
+ // ControllerStackInfo.
+ static StackWalkAction WalkStack(FrameInfo *pInfo, void *data);
+
+
+ //void ControllerStackInfo::GetStackInfo(): GetStackInfo
+ // is invoked by the user to trigger the stack walk. This will
+ // cause the stack walk detailed in the class description to happen.
+ // Thread* thread: The thread to do the stack walk on.
+ // void* targetFP: Can be either NULL (meaning that the bottommost
+ // frame is the target), or an frame pointer, meaning that the
+ // caller wants information about a specific frame.
+ // CONTEXT* pContext: A pointer to a CONTEXT structure. Can be null,
+ // we use our temp context.
+ // bool suppressUMChainFromComPlusMethodFrameGeneric - A ridiculous flag that is trying to narrowly
+ // target a fix for issue 650903.
+ // StackTraceTicket - ticket ensuring that we have permission to call this.
+ void GetStackInfo(
+ StackTraceTicket ticket,
+ Thread *thread,
+ FramePointer targetFP,
+ CONTEXT *pContext,
+ bool suppressUMChainFromComPlusMethodFrameGeneric = false
+ );
+
+ //bool ControllerStackInfo::HasReturnFrame() Returns
+ // true if m_returnFrame is valid. Returns false
+ // if m_returnFrame is set to m_activeFrame
+ bool HasReturnFrame() {LIMITED_METHOD_CONTRACT; return m_returnFound; }
+
+ // This function "undoes" an unwind, i.e. it takes the active frame (the current frame)
+ // and sets it to be the return frame (the caller frame). Currently it is only used by
+ // the stepper to step out of an LCG method. See DebuggerStepper::DetectHandleLCGMethods()
+ // for more information.
+ void SetReturnFrameWithActiveFrame();
+
+private:
+ // If we don't have a valid context, then use this temp cache.
+ CONTEXT m_tempContext;
+
+ bool m_activeFound;
+ bool m_returnFound;
+
+ // A ridiculous flag that is targetting a very narrow fix at issue 650903
+ // (4.5.1/Blue). This is set for the duration of a stackwalk designed to
+ // help us "Step Out" to a managed frame (i.e., managed-only debugging).
+ bool m_suppressUMChainFromComPlusMethodFrameGeneric;
+
+ // Track if this stackwalk actually happened.
+ // This is used by the StackTraceTicket(ControllerStackInfo * info) ticket.
+ INDEBUG(bool m_dbgExecuted);
+};
+
+#endif // !DACCESS_COMPILE
+
+
+/* ------------------------------------------------------------------------- *
+ * DebuggerController routines
+ * ------------------------------------------------------------------------- */
+
+// simple ref-counted buffer that's shared among DebuggerPatchSkippers for a
+// given DebuggerControllerPatch. upon creation the refcount will be 1. when
+// the last skipper and controller are cleaned up the buffer will be released.
+// note that there isn't a clear owner of this buffer since a controller can be
+// cleaned up while the final skipper is still in flight.
+class SharedPatchBypassBuffer
+{
+public:
+ SharedPatchBypassBuffer() : m_refCount(1)
+ {
+#ifdef _DEBUG
+ DWORD cbToProtect = MAX_INSTRUCTION_LENGTH;
+ _ASSERTE(DbgIsExecutable((BYTE*)PatchBypass, cbToProtect));
+#endif // _DEBUG
+
+ // sentinel value indicating uninitialized data
+ *(reinterpret_cast<DWORD*>(PatchBypass)) = SentinelValue;
+#ifdef _TARGET_AMD64_
+ *(reinterpret_cast<DWORD*>(BypassBuffer)) = SentinelValue;
+ RipTargetFixup = 0;
+ RipTargetFixupSize = 0;
+#endif
+ }
+
+ ~SharedPatchBypassBuffer()
+ {
+ // trap deletes that don't go through Release()
+ _ASSERTE(m_refCount == 0);
+ }
+
+ LONG AddRef()
+ {
+ InterlockedIncrement(&m_refCount);
+ _ASSERTE(m_refCount > 0);
+ return m_refCount;
+ }
+
+ LONG Release()
+ {
+ LONG result = InterlockedDecrement(&m_refCount);
+ _ASSERTE(m_refCount >= 0);
+
+ if (m_refCount == 0)
+ {
+ TRACE_FREE(this);
+ DeleteInteropSafeExecutable(this);
+ }
+
+ return result;
+ }
+
+ // "PatchBypass" must be the first field of this class for alignment to be correct.
+ BYTE PatchBypass[MAX_INSTRUCTION_LENGTH];
+#if defined(_TARGET_AMD64_)
+ const static int cbBufferBypass = 0x10;
+ BYTE BypassBuffer[cbBufferBypass];
+
+ UINT_PTR RipTargetFixup;
+ BYTE RipTargetFixupSize;
+#endif
+
+private:
+ const static DWORD SentinelValue = 0xffffffff;
+ LONG m_refCount;
+};
+
+// struct DebuggerFunctionKey: Provides a means of hashing unactivated
+// breakpoints, it's used mainly for the case where the function to put
+// the breakpoint in hasn't been JITted yet.
+// Module* module: Module that the method belongs to.
+// mdMethodDef md: meta data token for the method.
+struct DebuggerFunctionKey1
+{
+ PTR_Module module;
+ mdMethodDef md;
+};
+
+typedef DebuggerFunctionKey1 UNALIGNED DebuggerFunctionKey;
+
+// ILMaster: Breakpoints on IL code may need to be applied to multiple
+// copies of code, because generics mean code gets JITTed multiple times.
+// The "master" is a patch we keep to record the IL offset, and is used to
+// create new "slave"patches.
+
+//
+// ILSlave: The slaves created from ILMaster patches. The offset for
+// these is initially an IL offset and later becomes a native offset.
+//
+// NativeManaged: A patch we apply to managed code, usually for walkers etc.
+//
+// NativeUnmanaged: A patch applied to any kind of native code.
+
+enum DebuggerPatchKind { PATCH_KIND_IL_MASTER, PATCH_KIND_IL_SLAVE, PATCH_KIND_NATIVE_MANAGED, PATCH_KIND_NATIVE_UNMANAGED };
+
+// struct DebuggerControllerPatch: An entry in the patch (hash) table,
+// this should contain all the info that's needed over the course of a
+// patch's lifetime.
+//
+// FREEHASHENTRY entry: Three ULONGs, this is required
+// by the underlying hashtable implementation
+// DWORD opcode: A nonzero opcode && address field means that
+// the patch has been applied to something.
+// A patch with a zero'd opcode field means that the patch isn't
+// actually tracking a valid break opcode. See DebuggerPatchTable
+// for more details.
+// DebuggerController *controller: The controller that put this
+// patch here.
+// BOOL fSaveOpcode: If true, then unapply patch will save
+// a copy of the opcode in opcodeSaved, and apply patch will
+// copy opcodeSaved to opcode rather than grabbing the opcode
+// from the instruction. This is useful mainly when the JIT
+// has moved code, and we don't want to erroneously pick up the
+// user break instruction.
+// Full story:
+// FJIT moves the code. Once that's done, it calls Debugger->MoveCode(MethodDesc
+// *) to let us know the code moved. At that point, unbind all the breakpoints
+// in the method. Then we whip over all the patches, and re-bind all the
+// patches in the method. However, we can't guarantee that the code will exist
+// in both the old & new locations exclusively of each other (the method could
+// be 0xFF bytes big, and get moved 0x10 bytes in one direction), so instead of
+// simply re-using the unbind/rebind logic as it is, we need a special case
+// wherein the old method isn't valid. Instead, we'll copy opcode into
+// opcodeSaved, and then zero out opcode (we need to zero out opcode since that
+// tells us that the patch is invalid, if the right side sees it). Thus the run-
+// around.
+// DebuggerPatchKind: see above
+// DWORD opcodeSaved: Contains an opcode if fSaveOpcode == true
+// SIZE_T nVersion: If the patch is stored by IL offset, then we
+// must also store the version ID so that we know which version
+// this is supposed to be applied to. Note that this will only
+// be set for DebuggerBreakpoints & DebuggerEnCBreakpoints. For
+// others, it should be set to DMI_VERSION_INVALID. For constants,
+// see DebuggerJitInfo
+// DebuggerJitInfo dji: A pointer to the debuggerJitInfo that describes
+// the method (and version) that this patch is applied to. This field may
+// also have the value DebuggerJitInfo::DMI_VERSION_INVALID
+
+// SIZE_T pid: Within a given patch table, all patches have a
+// semi-unique ID. There should be one and only 1 patch for a given
+// {pid,nVersion} tuple, thus ensuring that we don't duplicate
+// patches from multiple, previous versions.
+// AppDomain * pAppDomain: Either NULL (patch applies to all appdomains
+// that the debugger is attached to)
+// or contains a pointer to an AppDomain object (patch applies only to
+// that A.D.)
+
+// NOTE: due to unkind abuse of type system you cannot add ctor/dtor to this
+// type and expect them to be automatically invoked!
+struct DebuggerControllerPatch
+{
+ friend class DebuggerPatchTable;
+ friend class DebuggerController;
+
+ FREEHASHENTRY entry;
+ DebuggerController *controller;
+ DebuggerFunctionKey key;
+ SIZE_T offset;
+ PTR_CORDB_ADDRESS_TYPE address;
+ FramePointer fp;
+ PRD_TYPE opcode; //this name will probably change because it is a misnomer
+ BOOL fSaveOpcode;
+ PRD_TYPE opcodeSaved;//also a misnomer
+ BOOL offsetIsIL;
+ TraceDestination trace;
+private:
+ int refCount;
+ union
+ {
+ SIZE_T encVersion; // used for Master patches, to record which EnC version this Master applies to
+ DebuggerJitInfo *dji; // used for Slave and native patches, though only when tracking JIT Info
+ };
+
+#ifndef _TARGET_ARM_
+ // this is shared among all the skippers for this controller. see the comments
+ // right before the definition of SharedPatchBypassBuffer for lifetime info.
+ SharedPatchBypassBuffer* m_pSharedPatchBypassBuffer;
+#endif // _TARGET_ARM_
+
+public:
+ SIZE_T pid;
+ AppDomain *pAppDomain;
+
+ BOOL IsNativePatch();
+ BOOL IsManagedPatch();
+ BOOL IsILMasterPatch();
+ BOOL IsILSlavePatch();
+ DebuggerPatchKind GetKind();
+
+ // A patch has DJI if it was created with it or if it has been mapped to a
+ // function that has been jitted while JIT tracking was on. It does not
+ // necessarily mean the patch is bound. ILMaster patches never have DJIs.
+ // Patches will never have DJIs if we are not tracking JIT information.
+ //
+ // Patches can also be unbound, e.g. in UnbindFunctionPatches. Any DJI gets cleared
+ // when the patch is unbound. This appears to be used as an indicator
+ // to Debugger::MapAndBindFunctionPatches to make sure that
+ // we don't skip the patch when we get new code.
+ BOOL HasDJI()
+ {
+ return (!IsILMasterPatch() && dji != NULL);
+ }
+
+ DebuggerJitInfo *GetDJI()
+ {
+ _ASSERTE(!IsILMasterPatch());
+ return dji;
+ }
+
+ // These tell us which EnC version a patch relates to. They are used
+ // to determine if we are mapping a patch to a new version.
+ //
+ BOOL HasEnCVersion()
+ {
+ return (IsILMasterPatch() || HasDJI());
+ }
+
+ SIZE_T GetEnCVersion()
+ {
+ _ASSERTE(HasEnCVersion());
+ return (IsILMasterPatch() ? encVersion : (HasDJI() ? GetDJI()->m_encVersion : CorDB_DEFAULT_ENC_FUNCTION_VERSION));
+ }
+
+ // We set the DJI explicitly after mapping a patch
+ // to freshly jitted code or to a new version. The Unbind/Bind/MovedCode mess
+ // for the FJIT will also set the DJI to NULL as an indicator that Debugger::MapAndBindFunctionPatches
+ // should not skip the patch.
+ void SetDJI(DebuggerJitInfo *newDJI)
+ {
+ _ASSERTE(!IsILMasterPatch());
+ dji = newDJI;
+ }
+
+ // A patch is bound if we've mapped it to a real honest-to-goodness
+ // native address.
+ // Note that we currently activate all patches immediately after binding them, and
+ // delete all patches after unactivating them. This means that the window where
+ // a patch is bound but not active is very small (and should always be protected by
+ // a lock). We rely on this correlation in a few places, and ASSERT it explicitly there.
+ BOOL IsBound()
+ {
+ if( address == NULL ) {
+ // patch is unbound, cannot be active
+ _ASSERTE( PRDIsEmpty(opcode) );
+ return FALSE;
+ }
+
+ // IL Master patches are never bound.
+ _ASSERTE( !IsILMasterPatch() );
+
+ return TRUE;
+ }
+
+ // It would be nice if we never needed IsBreakpointPatch or IsStepperPatch,
+ // but a few bits of the existing code look at which controller type is involved.
+ BOOL IsBreakpointPatch();
+ BOOL IsStepperPatch();
+
+ bool IsActivated()
+ {
+ // Patch is activate if we've stored a non-zero opcode
+ // Note: this might be a problem as opcode 0 may be a valid opcode (see issue 366221).
+ if( PRDIsEmpty(opcode) ) {
+ return FALSE;
+ }
+
+ // Patch is active, so it must also be bound
+ _ASSERTE( address != NULL );
+ return TRUE;
+ }
+
+ bool IsFree() {return (refCount == 0);}
+ bool IsTriggering() {return (refCount > 1);}
+
+ // Is this patch at a position at which it's safe to take a stack?
+ bool IsSafeForStackTrace();
+
+#ifndef _TARGET_ARM_
+ // gets a pointer to the shared buffer
+ SharedPatchBypassBuffer* GetOrCreateSharedPatchBypassBuffer();
+
+ // entry point for general initialization when the controller is being created
+ void Initialize()
+ {
+ m_pSharedPatchBypassBuffer = NULL;
+ }
+
+ // entry point for general cleanup when the controller is being removed from the patch table
+ void DoCleanup()
+ {
+ if (m_pSharedPatchBypassBuffer != NULL)
+ m_pSharedPatchBypassBuffer->Release();
+ }
+#endif // _TARGET_ARM_
+
+private:
+ DebuggerPatchKind kind;
+};
+
+typedef DPTR(DebuggerControllerPatch) PTR_DebuggerControllerPatch;
+
+/* class DebuggerPatchTable: This is the table that contains
+ * information about the patches (breakpoints) maintained by the
+ * debugger for a variety of purposes.
+ * The only tricky part is that
+ * patches can be hashed either by the address that they're applied to,
+ * or by DebuggerFunctionKey. If address is equal to zero, then the
+ * patch is hashed by DebuggerFunctionKey.
+ *
+ * Patch table inspection scheme:
+ *
+ * We have to be able to inspect memory (read/write) from the right
+ * side w/o the help of the left side. When we do unmanaged debugging,
+ * we need to be able to R/W memory out of a debuggee s.t. the debugger
+ * won't see our patches. So we have to be able to read our patch table
+ * from the left side, which is problematic since we know that the left
+ * side will be arbitrarily frozen, but we don't know where.
+ *
+ * So our scheme is this:
+ * we'll send a pointer to the g_patches table over in startup,
+ * and when we want to inspect it at runtime, we'll freeze the left side,
+ * then read-memory the "data" (m_pcEntries) array over to the right. We'll
+ * iterate through the array & assume that anything with a non-zero opcode
+ * and address field is valid. To ensure that the assumption is ok, we
+ * use the zeroing allocator which zeros out newly created space, and
+ * we'll be very careful about zeroing out the opcode field during the
+ * Unapply operation
+ *
+ * NOTE: Don't mess with the memory protections on this while the
+ * left side is frozen (ie, no threads are executing).
+ * WriteMemory depends on being able to write the patchtable back
+ * if it was read successfully.
+ */
+#define DPT_INVALID_SLOT (UINT32_MAX)
+#define DPT_DEFAULT_TRACE_TYPE TRACE_OTHER
+
+/* Although CHashTableAndData can grow, we always use a fixed number of buckets.
+ * This is problematic for tables like the patch table which are usually small, but
+ * can become huge. When the number of entries far exceeds the number of buckets,
+ * lookup and addition basically degrade into linear searches. There is a trade-off
+ * here between wasting memory for unused buckets, and performance of large tables.
+ * Also note that the number of buckets should be a prime number.
+*/
+#define DPT_HASH_BUCKETS 1103
+
+class DebuggerPatchTable : private CHashTableAndData<CNewZeroData>
+{
+ VPTR_BASE_CONCRETE_VTABLE_CLASS(DebuggerPatchTable);
+
+ friend class DebuggerRCThread;
+private:
+ //incremented so that we can get DPT-wide unique PIDs.
+ // pid = Patch ID.
+ SIZE_T m_pid;
+ // Given a patch, retrieves the correct key. The return value of this function is passed to Cmp(), Find(), etc.
+ SIZE_T Key(DebuggerControllerPatch *patch)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Most clients of CHashTable pass a host pointer as the key. However, the key really could be
+ // anything. In our case, the key can either be a host pointer of type DebuggerFunctionKey or
+ // the address of the patch.
+ if (patch->address == NULL)
+ {
+ return (SIZE_T)(&patch->key);
+ }
+ else
+ {
+ return (SIZE_T)(dac_cast<TADDR>(patch->address));
+ }
+ }
+
+ // Given two DebuggerControllerPatches, tells
+ // whether they are equal or not. Does this by comparing the correct
+ // key.
+ // BYTE* pc1: If pc2 is hashed by address,
+ // pc1 is an address. If
+ // pc2 is hashed by DebuggerFunctionKey,
+ // pc1 is a DebuggerFunctionKey
+ //Returns true if the two patches are equal, false otherwise
+ BOOL Cmp(SIZE_T k1, const HASHENTRY * pc2)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ DebuggerControllerPatch * pPatch2 = dac_cast<PTR_DebuggerControllerPatch>(const_cast<HASHENTRY *>(pc2));
+
+ if (pPatch2->address == NULL)
+ {
+ // k1 is a host pointer of type DebuggerFunctionKey.
+ DebuggerFunctionKey * pKey1 = reinterpret_cast<DebuggerFunctionKey *>(k1);
+
+ return ((pKey1->module != pPatch2->key.module) || (pKey1->md != pPatch2->key.md));
+ }
+ else
+ {
+ return ((SIZE_T)(dac_cast<TADDR>(pPatch2->address)) != k1);
+ }
+ }
+
+ //Computes a hash value based on an address
+ ULONG HashAddress(PTR_CORDB_ADDRESS_TYPE address)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (ULONG)(SIZE_T)(dac_cast<TADDR>(address));
+ }
+
+ //Computes a hash value based on a DebuggerFunctionKey
+ ULONG HashKey(DebuggerFunctionKey * pKey)
+ {
+ SUPPORTS_DAC;
+ return HashPtr(pKey->md, pKey->module);
+ }
+
+ //Computes a hash value from a patch, using the address field
+ // if the patch is hashed by address, using the DebuggerFunctionKey
+ // otherwise
+ ULONG Hash(DebuggerControllerPatch * pPatch)
+ {
+ SUPPORTS_DAC;
+
+ if (pPatch->address == NULL)
+ return HashKey(&(pPatch->key));
+ else
+ return HashAddress(pPatch->address);
+ }
+ //Public Members
+public:
+ enum {
+ DCP_PID_INVALID,
+ DCP_PID_FIRST_VALID,
+ };
+
+#ifndef DACCESS_COMPILE
+
+ DebuggerPatchTable() : CHashTableAndData<CNewZeroData>(DPT_HASH_BUCKETS) { }
+
+ HRESULT Init()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ m_pid = DCP_PID_FIRST_VALID;
+
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ return NewInit(17, sizeof(DebuggerControllerPatch), 101);
+ }
+
+ // Assuming that the chain of patches (as defined by all the
+ // GetNextPatch from this patch) are either sorted or NULL, take the given
+ // patch (which should be the first patch in the chain). This
+ // is called by AddPatch to make sure that the order of the
+ // patches is what we want for things like E&C, DePatchSkips,etc.
+ void SortPatchIntoPatchList(DebuggerControllerPatch **ppPatch);
+
+ void SpliceOutOfList(DebuggerControllerPatch *patch);
+
+ void SpliceInBackOf(DebuggerControllerPatch *patchAppend,
+ DebuggerControllerPatch *patchEnd);
+
+ //
+ // Note that patches may be reallocated - do not keep a pointer to a patch.
+ //
+ DebuggerControllerPatch *AddPatchForMethodDef(DebuggerController *controller,
+ Module *module,
+ mdMethodDef md,
+ size_t offset,
+ DebuggerPatchKind kind,
+ FramePointer fp,
+ AppDomain *pAppDomain,
+ SIZE_T masterEnCVersion,
+ DebuggerJitInfo *dji);
+
+ DebuggerControllerPatch *AddPatchForAddress(DebuggerController *controller,
+ MethodDesc *fd,
+ size_t offset,
+ DebuggerPatchKind kind,
+ CORDB_ADDRESS_TYPE *address,
+ FramePointer fp,
+ AppDomain *pAppDomain,
+ DebuggerJitInfo *dji = NULL,
+ SIZE_T pid = DCP_PID_INVALID,
+ TraceType traceType = DPT_DEFAULT_TRACE_TYPE);
+
+ // Set the native address for this patch.
+ void BindPatch(DebuggerControllerPatch *patch, CORDB_ADDRESS_TYPE *address);
+ void UnbindPatch(DebuggerControllerPatch *patch);
+ void RemovePatch(DebuggerControllerPatch *patch);
+
+ // This is a sad legacy workaround. The patch table (implemented as this
+ // class) is shared across process. We publish the runtime offsets of
+ // some key fields. Since those fields are private, we have to provide
+ // accessors here. So if you're not using these functions, don't start.
+ // We can hopefully remove them.
+ static SIZE_T GetOffsetOfEntries()
+ {
+ // assert that we the offsets of these fields in the base class is
+ // the same as the offset of this field in this class.
+ _ASSERTE((void*)(DebuggerPatchTable*)NULL == (void*)(CHashTableAndData<CNewZeroData>*)NULL);
+ return helper_GetOffsetOfEntries();
+ }
+
+ static SIZE_T GetOffsetOfCount()
+ {
+ _ASSERTE((void*)(DebuggerPatchTable*)NULL == (void*)(CHashTableAndData<CNewZeroData>*)NULL);
+ return helper_GetOffsetOfCount();
+ }
+
+ // GetPatch find the first patch in the hash table
+ // that is hashed by matching the {Module,mdMethodDef} to the
+ // patch's DebuggerFunctionKey. This will NOT find anything
+ // hashed by address, even if that address is within the
+ // method specified.
+ // You can use GetNextPatch to iterate through all the patches keyed by
+ // this Module,mdMethodDef pair
+ DebuggerControllerPatch *GetPatch(Module *module, mdToken md)
+ {
+ DebuggerFunctionKey key;
+
+ key.module = module;
+ key.md = md;
+
+ return reinterpret_cast<DebuggerControllerPatch *>(Find(HashKey(&key), (SIZE_T)&key));
+ }
+#endif // #ifndef DACCESS_COMPILE
+
+ // GetPatch will translate find the first patch in the hash
+ // table that is hashed by address. It will NOT find anything hashed
+ // by {Module,mdMethodDef}, or by MethodDesc.
+ DebuggerControllerPatch * GetPatch(PTR_CORDB_ADDRESS_TYPE address)
+ {
+ SUPPORTS_DAC;
+ ARM_ONLY(_ASSERTE(dac_cast<DWORD>(address) & THUMB_CODE));
+
+ DebuggerControllerPatch * pPatch =
+ dac_cast<PTR_DebuggerControllerPatch>(Find(HashAddress(address), (SIZE_T)(dac_cast<TADDR>(address))));
+
+ return pPatch;
+ }
+
+ DebuggerControllerPatch *GetNextPatch(DebuggerControllerPatch *prev);
+
+ // Find the first patch in the patch table, and store
+ // index info in info. Along with GetNextPatch, this can
+ // iterate through the whole patch table. Note that since the
+ // hashtable operates via iterating through all the contents
+ // of all the buckets, if you add an entry while iterating
+ // through the table, you may or may not iterate across
+ // the new entries. You will iterate through all the entries
+ // that were present at the beginning of the run. You
+ // safely delete anything you've already iterated by, anything
+ // else is kinda risky.
+ DebuggerControllerPatch * GetFirstPatch(HASHFIND * pInfo)
+ {
+ SUPPORTS_DAC;
+
+ return dac_cast<PTR_DebuggerControllerPatch>(FindFirstEntry(pInfo));
+ }
+
+ // Along with GetFirstPatch, this can iterate through
+ // the whole patch table. See GetFirstPatch for more info
+ // on the rules of iterating through the table.
+ DebuggerControllerPatch * GetNextPatch(HASHFIND * pInfo)
+ {
+ SUPPORTS_DAC;
+
+ return dac_cast<PTR_DebuggerControllerPatch>(FindNextEntry(pInfo));
+ }
+
+ // Used by DebuggerController to translate an index
+ // of a patch into a direct pointer.
+ inline HASHENTRY * GetEntryPtr(ULONG iEntry)
+ {
+ SUPPORTS_DAC;
+
+ return EntryPtr(iEntry);
+ }
+
+ // Used by DebuggerController to grab indeces of patches
+ // rather than holding direct pointers to them.
+ inline ULONG GetItemIndex(HASHENTRY * p)
+ {
+ SUPPORTS_DAC;
+
+ return ItemIndex(p);
+ }
+
+#ifdef _DEBUG_PATCH_TABLE
+public:
+ // DEBUG An internal debugging routine, it iterates
+ // through the hashtable, stopping at every
+ // single entry, no matter what it's state. For this to
+ // compile, you're going to have to add friend status
+ // of this class to CHashTableAndData in
+ // to $\Com99\Src\inc\UtilCode.h
+ void CheckPatchTable();
+#endif // _DEBUG_PATCH_TABLE
+
+ // Count how many patches are in the table.
+ // Use for asserts
+ int GetNumberOfPatches();
+
+};
+
+typedef VPTR(class DebuggerPatchTable) PTR_DebuggerPatchTable;
+
+
+#if !defined(DACCESS_COMPILE)
+
+// DebuggerControllerPage|Will eventually be used for
+// 'break when modified' behaviour'
+typedef struct DebuggerControllerPage
+{
+ DebuggerControllerPage *next;
+ const BYTE *start, *end;
+ DebuggerController *controller;
+ bool readable;
+} DebuggerControllerPage;
+
+// DEBUGGER_CONTROLLER_TYPE: Identifies the type of the controller.
+// It exists b/c we have RTTI turned off.
+// Note that the order of these is important - SortPatchIntoPatchList
+// relies on this ordering.
+//
+// DEBUGGER_CONTROLLER_STATIC|Base class response. Should never be
+// seen, since we shouldn't be asking the base class about this.
+// DEBUGGER_CONTROLLER_BREAKPOINT|DebuggerBreakpoint
+// DEBUGGER_CONTROLLER_STEPPER|DebuggerStepper
+// DEBUGGER_CONTROLLER_THREAD_STARTER|DebuggerThreadStarter
+// DEBUGGER_CONTROLLER_ENC|DebuggerEnCBreakpoint
+// DEBUGGER_CONTROLLER_PATCH_SKIP|DebuggerPatchSkip
+// DEBUGGER_CONTROLLER_JMC_STEPPER|DebuggerJMCStepper - steps through Just-My-Code
+// DEBUGGER_CONTROLLER_CONTINUABLE_EXCEPTION|DebuggerContinuableExceptionBreakpoint
+enum DEBUGGER_CONTROLLER_TYPE
+{
+ DEBUGGER_CONTROLLER_THREAD_STARTER,
+ DEBUGGER_CONTROLLER_ENC,
+ DEBUGGER_CONTROLLER_ENC_PATCH_TO_SKIP, // At any one address,
+ // There can be only one!
+ DEBUGGER_CONTROLLER_PATCH_SKIP,
+ DEBUGGER_CONTROLLER_BREAKPOINT,
+ DEBUGGER_CONTROLLER_STEPPER,
+ DEBUGGER_CONTROLLER_FUNC_EVAL_COMPLETE,
+ DEBUGGER_CONTROLLER_USER_BREAKPOINT, // UserBreakpoints are used by Runtime threads to
+ // send that they've hit a user breakpoint to the Right Side.
+ DEBUGGER_CONTROLLER_JMC_STEPPER, // Stepper that only stops in JMC-functions.
+ DEBUGGER_CONTROLLER_CONTINUABLE_EXCEPTION,
+ DEBUGGER_CONTROLLER_STATIC,
+};
+
+enum TP_RESULT
+{
+ TPR_TRIGGER, // This controller wants to SendEvent
+ TPR_IGNORE, // This controller doesn't want to SendEvent
+ TPR_TRIGGER_ONLY_THIS, // This, and only this controller, should be triggered.
+ // Right now, only the DebuggerEnCRemap controller
+ // returns this, the remap patch should be the first
+ // patch in the list.
+ TPR_TRIGGER_ONLY_THIS_AND_LOOP,
+ // This, and only this controller, should be triggered.
+ // Right now, only the DebuggerEnCRemap controller
+ // returns this, the remap patch should be the first
+ // patch in the list.
+ // After triggering this, DPOSS should skip the
+ // ActivatePatchSkip call, so we hit the other
+ // breakpoints at this location.
+ TPR_IGNORE_AND_STOP, // Don't SendEvent, and stop asking other
+ // controllers if they want to.
+ // Service any previous triggered controllers.
+};
+
+enum SCAN_TRIGGER
+{
+ ST_PATCH = 0x1, // Only look for patches
+ ST_SINGLE_STEP = 0x2, // Look for patches, and single-steps.
+} ;
+
+enum TRIGGER_WHY
+{
+ TY_NORMAL = 0x0,
+ TY_SHORT_CIRCUIT= 0x1, // EnC short circuit - see DispatchPatchOrSingleStep
+} ;
+
+// the return value for DebuggerController::DispatchPatchOrSingleStep
+enum DPOSS_ACTION
+{
+ // the following enum has been carefully ordered to optimize the helper
+ // functions below. Do not re-order them w/o changing the helper funcs.
+ DPOSS_INVALID = 0x0, // invalid action value
+ DPOSS_DONT_CARE = 0x1, // don't care about this exception
+ DPOSS_USED_WITH_NO_EVENT = 0x2, // Care about this exception but won't send event to RS
+ DPOSS_USED_WITH_EVENT = 0x3, // Care about this exception and will send event to RS
+};
+
+// helper function
+inline bool IsInUsedAction(DPOSS_ACTION action)
+{
+ _ASSERTE(action != DPOSS_INVALID);
+ return (action >= DPOSS_USED_WITH_NO_EVENT);
+}
+
+inline void VerifyExecutableAddress(const BYTE* address)
+{
+// TODO: : when can we apply this to x86?
+#if defined(_WIN64)
+#if defined(_DEBUG)
+ MEMORY_BASIC_INFORMATION mbi;
+
+ if (sizeof(mbi) == ClrVirtualQuery(address, &mbi, sizeof(mbi)))
+ {
+ if (!(mbi.State & MEM_COMMIT))
+ {
+ STRESS_LOG1(LF_GCROOTS, LL_ERROR, "VerifyExecutableAddress: address is uncommited memory, address=0x%p", address);
+ CONSISTENCY_CHECK_MSGF((mbi.State & MEM_COMMIT), ("VEA: address (0x%p) is uncommited memory.", address));
+ }
+
+ if (!(mbi.Protect & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY)))
+ {
+ STRESS_LOG1(LF_GCROOTS, LL_ERROR, "VerifyExecutableAddress: address is not executable, address=0x%p", address);
+ CONSISTENCY_CHECK_MSGF((mbi.Protect & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY)),
+ ("VEA: address (0x%p) is not on an executable page.", address));
+ }
+ }
+#endif // _DEBUG
+#endif // _WIN64
+}
+
+#endif // !DACCESS_COMPILE
+
+
+// DebuggerController: DebuggerController serves
+// both as a static class that dispatches exceptions coming from the
+// EE, and as an abstract base class for the five classes that derrive
+// from it.
+class DebuggerController
+{
+ VPTR_BASE_CONCRETE_VTABLE_CLASS(DebuggerController);
+
+#if !defined(DACCESS_COMPILE)
+
+ // Needs friendship for lock because of EnC locking workarounds.
+ friend class DebuggerEnCBreakpoint;
+
+ friend class DebuggerPatchSkip;
+ friend class DebuggerRCThread; //so we can get offsets of fields the
+ //right side needs to read
+ friend class Debugger; // So Debugger can lock, use, unlock the patch
+ // table in MapAndBindFunctionBreakpoints
+ friend void Debugger::UnloadModule(Module* pRuntimeModule, AppDomain *pAppDomain);
+
+ //
+ // Static functionality
+ //
+
+ public:
+ // Once we support debugging + fibermode (which was cut in V2.0), we may need some Thread::BeginThreadAffinity() calls
+ // associated with the controller lock because this lock wraps context operations.
+ class ControllerLockHolder : public CrstHolder
+ {
+ public:
+ ControllerLockHolder() : CrstHolder(&g_criticalSection) { WRAPPER_NO_CONTRACT; }
+ };
+
+ static HRESULT Initialize();
+
+ // Remove and cleanup all DebuggerControllers for detach
+ static void DeleteAllControllers();
+
+ //
+ // global event dispatching functionality
+ //
+
+
+ // Controllers are notified when they enter/exit func-evals (on their same thread,
+ // on any any thread if the controller doesn't have a thread).
+ // The original use for this was to allow steppers to skip through func-evals.
+ // thread - the thread doing the funceval.
+ static void DispatchFuncEvalEnter(Thread * thread);
+ static void DispatchFuncEvalExit(Thread * thread);
+
+ static bool DispatchNativeException(EXCEPTION_RECORD *exception,
+ CONTEXT *context,
+ DWORD code,
+ Thread *thread);
+
+ static bool DispatchUnwind(Thread *thread,
+ MethodDesc *fd, DebuggerJitInfo * pDJI, SIZE_T offset,
+ FramePointer handlerFP,
+ CorDebugStepReason unwindReason);
+
+ static bool DispatchTraceCall(Thread *thread,
+ const BYTE *address);
+
+ static PRD_TYPE GetPatchedOpcode(CORDB_ADDRESS_TYPE *address);
+
+ static BOOL CheckGetPatchedOpcode(CORDB_ADDRESS_TYPE *address, /*OUT*/ PRD_TYPE *pOpcode);
+
+ // pIP is the ip right after the prolog of the method we've entered.
+ // fp is the frame pointer for that method.
+ static void DispatchMethodEnter(void * pIP, FramePointer fp);
+
+
+ // Delete any patches that exist for a specific module and optionally a specific AppDomain.
+ // If pAppDomain is specified, then only patches tied to the specified AppDomain are
+ // removed. If pAppDomain is null, then all patches for the module are removed.
+ static void RemovePatchesFromModule( Module* pModule, AppDomain* pAppdomain );
+
+ // Check whether there are any pathces in the patch table for the specified module.
+ static bool ModuleHasPatches( Module* pModule );
+
+#if EnC_SUPPORTED
+ static DebuggerControllerPatch *IsXXXPatched(const BYTE *eip,
+ DEBUGGER_CONTROLLER_TYPE dct);
+
+ static DebuggerControllerPatch *GetEnCPatch(const BYTE *address);
+#endif //EnC_SUPPORTED
+
+ static DPOSS_ACTION ScanForTriggers(CORDB_ADDRESS_TYPE *address,
+ Thread *thread,
+ CONTEXT *context,
+ DebuggerControllerQueue *pDcq,
+ SCAN_TRIGGER stWhat,
+ TP_RESULT *pTpr);
+
+
+ static DebuggerPatchSkip *ActivatePatchSkip(Thread *thread,
+ const BYTE *eip,
+ BOOL fForEnC);
+
+
+ static DPOSS_ACTION DispatchPatchOrSingleStep(Thread *thread,
+ CONTEXT *context,
+ CORDB_ADDRESS_TYPE *ip,
+ SCAN_TRIGGER which);
+
+
+ static int GetNumberOfPatches()
+ {
+ if (g_patches == NULL)
+ return 0;
+
+ return g_patches->GetNumberOfPatches();
+ }
+
+ static int GetTotalMethodEnter() {LIMITED_METHOD_CONTRACT; return g_cTotalMethodEnter; }
+
+#if defined(_DEBUG)
+ // Debug check that we only have 1 thread-starter per thread.
+ // Check this new one against all existing ones.
+ static void EnsureUniqueThreadStarter(DebuggerThreadStarter * pNew);
+#endif
+ // If we have a thread-starter on the given EE thread, make sure it's cancel.
+ // Thread-Starters normally delete themselves when they fire. But if the EE
+ // destroys the thread before it fires, then we'd still have an active DTS.
+ static void CancelOutstandingThreadStarter(Thread * pThread);
+
+ static void AddRef(DebuggerControllerPatch *patch);
+ static void Release(DebuggerControllerPatch *patch);
+
+ private:
+
+ static bool MatchPatch(Thread *thread, CONTEXT *context,
+ DebuggerControllerPatch *patch);
+
+ // Returns TRUE if we should continue to dispatch after this exception
+ // hook.
+ static BOOL DispatchExceptionHook(Thread *thread, CONTEXT *context,
+ EXCEPTION_RECORD *exception);
+
+protected:
+#ifdef _DEBUG
+ static bool HasLock()
+ {
+ return g_criticalSection.OwnedByCurrentThread() != 0;
+ }
+#endif
+
+#endif // !DACCESS_COMPILE
+
+private:
+ SPTR_DECL(DebuggerPatchTable, g_patches);
+ SVAL_DECL(BOOL, g_patchTableValid);
+
+#if !defined(DACCESS_COMPILE)
+
+private:
+ static DebuggerControllerPage *g_protections;
+ static DebuggerController *g_controllers;
+
+ // This is the "Controller" lock. It synchronizes the controller infrastructure.
+ // It is smaller than the debugger lock, but larger than the debugger-data lock.
+ // It needs to be taken in execution-control related callbacks; and will also call
+ // back into the EE when held (most notably for the stub-managers; but also for various
+ // query operations).
+ static CrstStatic g_criticalSection;
+
+ // Write is protected by both Debugger + Controller Lock
+ static int g_cTotalMethodEnter;
+
+ static bool BindPatch(DebuggerControllerPatch *patch,
+ MethodDesc *fd,
+ CORDB_ADDRESS_TYPE *startAddr);
+ static bool ApplyPatch(DebuggerControllerPatch *patch);
+ static bool UnapplyPatch(DebuggerControllerPatch *patch);
+ static void UnapplyPatchAt(DebuggerControllerPatch *patch, CORDB_ADDRESS_TYPE *address);
+ static bool IsPatched(CORDB_ADDRESS_TYPE *address, BOOL native);
+
+ static void ActivatePatch(DebuggerControllerPatch *patch);
+ static void DeactivatePatch(DebuggerControllerPatch *patch);
+
+ static void ApplyTraceFlag(Thread *thread);
+ static void UnapplyTraceFlag(Thread *thread);
+
+ public:
+ static const BYTE *g_pMSCorEEStart, *g_pMSCorEEEnd;
+
+ static const BYTE *GetILPrestubDestination(const BYTE *prestub);
+ static const BYTE *GetILFunctionCode(MethodDesc *fd);
+
+ //
+ // Non-static functionality
+ //
+
+ public:
+
+ DebuggerController(Thread * pThread, AppDomain * pAppDomain);
+ virtual ~DebuggerController();
+ void Delete();
+ bool IsDeleted() { return m_deleted; }
+
+#endif // !DACCESS_COMPILE
+
+
+ // Return the pointer g_patches.
+ // Access to patch table for the RC threads (EE,DI)
+ // Why: The right side needs to know the address of the patch
+ // table (which never changes after it gets created) so that ReadMemory,
+ // WriteMemory can work from out-of-process. This should only be used in
+ // when the Runtime Controller is starting up, and not thereafter.
+ // How:return g_patches;
+public:
+ static DebuggerPatchTable * GetPatchTable() {LIMITED_METHOD_DAC_CONTRACT; return g_patches; }
+ static BOOL GetPatchTableValid() {LIMITED_METHOD_DAC_CONTRACT; return g_patchTableValid; }
+
+#if !defined(DACCESS_COMPILE)
+ static BOOL *GetPatchTableValidAddr() {LIMITED_METHOD_CONTRACT; return &g_patchTableValid; }
+
+ // Is there a patch at addr?
+ // We sometimes want to use this version of the method
+ // (as opposed to IsPatched) because there is
+ // a race condition wherein a patch can be added to the table, we can
+ // ask about it, and then we can actually apply the patch.
+ // How: If the patch table contains a patch at that address, there
+ // is.
+ static bool IsAddressPatched(CORDB_ADDRESS_TYPE *address)
+ {
+ return (g_patches->GetPatch(address) != NULL);
+ }
+
+ //
+ // Event setup
+ //
+
+ Thread *GetThread() { return m_thread; }
+
+ // This one should be made private
+ BOOL AddBindAndActivateILSlavePatch(DebuggerControllerPatch *master,
+ DebuggerJitInfo *dji);
+
+ BOOL AddILPatch(AppDomain * pAppDomain, Module *module,
+ mdMethodDef md,
+ SIZE_T encVersion, // what encVersion does this apply to?
+ SIZE_T offset);
+
+ // The next two are very similar. Both work on offsets,
+ // but one takes a "patch id". I don't think these are really needed: the
+ // controller itself can act as the id of the patch.
+ BOOL AddBindAndActivateNativeManagedPatch(
+ MethodDesc * fd,
+ DebuggerJitInfo *dji,
+ SIZE_T offset,
+ FramePointer fp,
+ AppDomain *pAppDomain);
+
+ // Add a patch at the start of a not-yet-jitted method.
+ void AddPatchToStartOfLatestMethod(MethodDesc * fd);
+
+
+ // This version is particularly useful b/c it doesn't assume that the
+ // patch is inside a managed method.
+ DebuggerControllerPatch *AddAndActivateNativePatchForAddress(CORDB_ADDRESS_TYPE *address,
+ FramePointer fp,
+ bool managed,
+ TraceType traceType);
+
+
+
+ bool PatchTrace(TraceDestination *trace, FramePointer fp, bool fStopInUnmanaged);
+
+ void AddProtection(const BYTE *start, const BYTE *end, bool readable);
+ void RemoveProtection(const BYTE *start, const BYTE *end, bool readable);
+
+ static BOOL IsSingleStepEnabled(Thread *pThread);
+ bool IsSingleStepEnabled();
+ void EnableSingleStep();
+ static void EnableSingleStep(Thread *pThread);
+
+ void DisableSingleStep();
+
+ void EnableExceptionHook();
+ void DisableExceptionHook();
+
+ void EnableUnwind(FramePointer frame);
+ void DisableUnwind();
+ FramePointer GetUnwind();
+
+ void EnableTraceCall(FramePointer fp);
+ void DisableTraceCall();
+
+ bool IsMethodEnterEnabled();
+ void EnableMethodEnter();
+ void DisableMethodEnter();
+
+ void DisableAll();
+
+ virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
+ { return DEBUGGER_CONTROLLER_STATIC; }
+
+ // Return true iff this is one of the stepper types.
+ // if true, we can safely cast this controller to a DebuggerStepper*.
+ inline bool IsStepperDCType()
+ {
+ DEBUGGER_CONTROLLER_TYPE e = this->GetDCType();
+ return (e == DEBUGGER_CONTROLLER_STEPPER) || (e == DEBUGGER_CONTROLLER_JMC_STEPPER);
+ }
+
+ void Enqueue();
+ void Dequeue();
+
+ private:
+ // Helper function that is called on each virtual trace call target to set a trace patch
+ static void PatchTargetVisitor(TADDR pVirtualTraceCallTarget, VOID* pUserData);
+
+ DebuggerControllerPatch *AddILMasterPatch(Module *module,
+ mdMethodDef md,
+ SIZE_T offset,
+ SIZE_T encVersion);
+
+ BOOL AddBindAndActivatePatchForMethodDesc(MethodDesc *fd,
+ DebuggerJitInfo *dji,
+ SIZE_T offset,
+ DebuggerPatchKind kind,
+ FramePointer fp,
+ AppDomain *pAppDomain);
+
+
+ protected:
+
+ //
+ // Target event handlers
+ //
+
+
+ // Notify a controller that a func-eval is starting/ending on the given thread.
+ // If a controller's m_thread!=NULL, then it is only notified of func-evals on
+ // its thread.
+ // Controllers don't need to Enable anything to get this, and most controllers
+ // can ignore it.
+ virtual void TriggerFuncEvalEnter(Thread * thread);
+ virtual void TriggerFuncEvalExit(Thread * thread);
+
+ virtual TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy);
+
+ // Dispatched when we get a SingleStep exception on this thread.
+ // Return true if we want SendEvent to get called.
+
+ virtual bool TriggerSingleStep(Thread *thread, const BYTE *ip);
+
+
+ // Dispatched to notify the controller when we are going to a filter/handler
+ // that's in the stepper's current frame or above (a caller frame).
+ // 'desc' & 'offset' are the location of the filter/handler (ie, this is where
+ // execution will continue)
+ // 'frame' points into the stack at the return address for the function w/ the handler.
+ // If (frame > m_unwindFP) then the filter/handler is in a caller, else
+ // it's in the same function as the current stepper (It's not in a child because
+ // we don't dispatch in that case).
+ virtual void TriggerUnwind(Thread *thread, MethodDesc *fd, DebuggerJitInfo * pDJI,
+ SIZE_T offset, FramePointer fp,
+ CorDebugStepReason unwindReason);
+
+ virtual void TriggerTraceCall(Thread *thread, const BYTE *ip);
+ virtual TP_RESULT TriggerExceptionHook(Thread *thread, CONTEXT * pContext,
+ EXCEPTION_RECORD *exception);
+
+ // Trigger when we've entered a method
+ // thread - current thread
+ // desc - the method that we've entered
+ // ip - the address after the prolog. A controller can patch this address.
+ // To stop in this method.
+ // Returns true if the trigger will disable itself from further method entry
+ // triggers else returns false (passing through a cctor can cause this).
+ // A controller can't block in this trigger! It can only update state / set patches
+ // and then return.
+ virtual void TriggerMethodEnter(Thread * thread,
+ DebuggerJitInfo *dji,
+ const BYTE * ip,
+ FramePointer fp);
+
+
+ // Send the managed debug event.
+ // This is called after TriggerPatch/TriggerSingleStep actually trigger.
+ // Note this can have a strange interaction with SetIp. Specifically this thread:
+ // 1) may call TriggerXYZ which queues the controller for send event.
+ // 2) blocks on a the debugger lock (in which case SetIp may get invoked on it)
+ // 3) then sends the event
+ // If SetIp gets invoked at step 2, the thread's IP may have changed such that it should no
+ // longer trigger. Eg, perhaps we were about to send a breakpoint, and then SetIp moved us off
+ // the bp. So we pass in an extra flag, fInteruptedBySetIp, to let the controller decide how to handle this.
+ // Since SetIP only works within a single function, this can only be an issue if a thread's current stopping
+ // location and the patch it set are in the same function. (So this could happen for step-over, but never
+ // setp-out).
+ // This flag will almost always be false.
+ //
+ // Once we actually send the event, we're under the debugger lock, and so the world is stable underneath us.
+ // But the world may change underneath a thread between when SendEvent gets queued and by the time it's actually called.
+ // So SendIPCEvent may need to do some last-minute sanity checking (like the SetIP case) to ensure it should
+ // still send.
+ //
+ // Returns true if send an event, false elsewise.
+ virtual bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
+
+ AppDomain *m_pAppDomain;
+
+ private:
+
+ Thread *m_thread;
+ DebuggerController *m_next;
+ bool m_singleStep;
+ bool m_exceptionHook;
+ bool m_traceCall;
+protected:
+ FramePointer m_traceCallFP;
+private:
+ FramePointer m_unwindFP;
+ int m_eventQueuedCount;
+ bool m_deleted;
+ bool m_fEnableMethodEnter;
+
+#endif // !DACCESS_COMPILE
+};
+
+
+#if !defined(DACCESS_COMPILE)
+
+/* ------------------------------------------------------------------------- *
+ * DebuggerPatchSkip routines
+ * ------------------------------------------------------------------------- */
+
+class DebuggerPatchSkip : public DebuggerController
+{
+ friend class DebuggerController;
+
+ DebuggerPatchSkip(Thread *thread,
+ DebuggerControllerPatch *patch,
+ AppDomain *pAppDomain);
+
+ ~DebuggerPatchSkip();
+
+ bool TriggerSingleStep(Thread *thread,
+ const BYTE *ip);
+
+ TP_RESULT TriggerExceptionHook(Thread *thread, CONTEXT * pContext,
+ EXCEPTION_RECORD *exception);
+
+ TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy);
+
+ virtual DEBUGGER_CONTROLLER_TYPE GetDCType(void)
+ { return DEBUGGER_CONTROLLER_PATCH_SKIP; }
+
+ void CopyInstructionBlock(BYTE *to, const BYTE* from);
+
+ void DecodeInstruction(CORDB_ADDRESS_TYPE *code);
+
+ CORDB_ADDRESS_TYPE *m_address;
+ int m_iOrigDisp; // the original displacement of a relative call or jump
+ InstructionAttribute m_instrAttrib; // info about the instruction being skipped over
+#ifndef _TARGET_ARM_
+ // this is shared among all the skippers and the controller. see the comments
+ // right before the definition of SharedPatchBypassBuffer for lifetime info.
+ SharedPatchBypassBuffer *m_pSharedPatchBypassBuffer;
+
+public:
+ CORDB_ADDRESS_TYPE *GetBypassAddress()
+ {
+ _ASSERTE(m_pSharedPatchBypassBuffer);
+ BYTE* patchBypass = m_pSharedPatchBypassBuffer->PatchBypass;
+ return (CORDB_ADDRESS_TYPE *)patchBypass;
+ }
+#endif // _TARGET_ARM_
+};
+
+/* ------------------------------------------------------------------------- *
+ * DebuggerBreakpoint routines
+ * ------------------------------------------------------------------------- */
+
+// DebuggerBreakpoint:
+// DBp represents a user-placed breakpoint, and when Triggered, will
+// always want to be activated, whereupon it will inform the right side of
+// being hit.
+class DebuggerBreakpoint : public DebuggerController
+{
+public:
+ DebuggerBreakpoint(Module *module,
+ mdMethodDef md,
+ AppDomain *pAppDomain,
+ SIZE_T m_offset,
+ bool m_native,
+ SIZE_T ilEnCVersion, // must give the EnC version for non-native bps
+ MethodDesc *nativeMethodDesc, // must be non-null when m_native, null otherwise
+ DebuggerJitInfo *nativeJITInfo, // optional when m_native, null otherwise
+ BOOL *pSucceed
+ );
+
+ virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
+ { return DEBUGGER_CONTROLLER_BREAKPOINT; }
+
+private:
+
+ TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy);
+ bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
+};
+
+// * ------------------------------------------------------------------------ *
+// * DebuggerStepper routines
+// * ------------------------------------------------------------------------ *
+//
+
+// DebuggerStepper: This subclass of DebuggerController will
+// be instantiated to create a "Step" operation, meaning that execution
+// should continue until a range of IL code is exited.
+class DebuggerStepper : public DebuggerController
+{
+public:
+ DebuggerStepper(Thread *thread,
+ CorDebugUnmappedStop rgfMappingStop,
+ CorDebugIntercept interceptStop,
+ AppDomain *appDomain);
+ ~DebuggerStepper();
+
+ bool Step(FramePointer fp, bool in,
+ COR_DEBUG_STEP_RANGE *range, SIZE_T cRange, bool rangeIL);
+ void StepOut(FramePointer fp, StackTraceTicket ticket);
+
+ virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
+ { return DEBUGGER_CONTROLLER_STEPPER; }
+
+ //MoveToCurrentVersion makes sure that the stepper is prepared to
+ // operate within the version of the code specified by djiNew.
+ // Currently, this means to map the ranges into the ranges of the djiNew.
+ // Idempotent.
+ void MoveToCurrentVersion( DebuggerJitInfo *djiNew);
+
+ // Public & Polymorphic on flavor (traditional vs. JMC).
+
+ // Regular steppers want to EnableTraceCall; and JMC-steppers want to EnableMethodEnter.
+ // (They're very related - they both stop at the next "interesting" managed code run).
+ // So we just gloss over the difference w/ some polymorphism.
+ virtual void EnablePolyTraceCall();
+
+protected:
+ // Steppers override these so that they can skip func-evals.
+ void TriggerFuncEvalEnter(Thread * thread);
+ void TriggerFuncEvalExit(Thread * thread);
+
+ bool TrapStepInto(ControllerStackInfo *info,
+ const BYTE *ip,
+ TraceDestination *pTD);
+
+ bool TrapStep(ControllerStackInfo *info, bool in);
+
+ // @todo - must remove that fForceTraditional flag. Need a way for a JMC stepper
+ // to do a Trad step out.
+ void TrapStepOut(ControllerStackInfo *info, bool fForceTraditional = false);
+
+ // Polymorphic on flavor (Traditional vs. Just-My-Code)
+ virtual void TrapStepNext(ControllerStackInfo *info);
+ virtual bool TrapStepInHelper(ControllerStackInfo * pInfo,
+ const BYTE * ipCallTarget,
+ const BYTE * ipNext,
+ bool fCallingIntoFunclet);
+ virtual bool IsInterestingFrame(FrameInfo * pFrame);
+ virtual bool DetectHandleNonUserCode(ControllerStackInfo *info, DebuggerMethodInfo * pInfo);
+
+
+ //DetectHandleInterceptors will figure out if the current
+ // frame is inside an interceptor, and if we're not interested in that
+ // interceptor, it will set a breakpoint outside it so that we can
+ // run to after the interceptor.
+ virtual bool DetectHandleInterceptors(ControllerStackInfo *info);
+
+ // This function checks whether the given IP is in an LCG method. If so, it enables
+ // JMC and does a step out. This effectively makes sure that we never stop in an LCG method.
+ BOOL DetectHandleLCGMethods(const PCODE ip, MethodDesc * pMD, ControllerStackInfo * pInfo);
+
+ bool IsAddrWithinFrame(DebuggerJitInfo *dji,
+ MethodDesc* pMD,
+ const BYTE* currentAddr,
+ const BYTE* targetAddr);
+
+ // x86 shouldn't need to call this method directly.
+ // We should call IsAddrWithinFrame() on x86 instead.
+ // That's why I use a name with the word "funclet" in it to scare people off.
+ bool IsAddrWithinMethodIncludingFunclet(DebuggerJitInfo *dji,
+ MethodDesc* pMD,
+ const BYTE* targetAddr);
+
+ //ShouldContinue returns false if the DebuggerStepper should stop
+ // execution and inform the right side. Returns true if the next
+ // breakpointexecution should be set, and execution allowed to continue
+ bool ShouldContinueStep( ControllerStackInfo *info, SIZE_T nativeOffset );
+
+ //IsInRange returns true if the given IL offset is inside of
+ // any of the COR_DEBUG_STEP_RANGE structures given by range.
+ bool IsInRange(SIZE_T offset, COR_DEBUG_STEP_RANGE *range, SIZE_T rangeCount,
+ ControllerStackInfo *pInfo = NULL);
+ bool IsRangeAppropriate(ControllerStackInfo *info);
+
+
+
+ TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy);
+ bool TriggerSingleStep(Thread *thread, const BYTE *ip);
+ void TriggerUnwind(Thread *thread, MethodDesc *fd, DebuggerJitInfo * pDJI,
+ SIZE_T offset, FramePointer fp,
+ CorDebugStepReason unwindReason);
+ void TriggerTraceCall(Thread *thread, const BYTE *ip);
+ bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
+
+
+ virtual void TriggerMethodEnter(Thread * thread, DebuggerJitInfo * dji, const BYTE * ip, FramePointer fp);
+
+
+ void ResetRange();
+
+ // Given a set of IL ranges, convert them to native and cache them.
+ bool SetRangesFromIL(DebuggerJitInfo * dji, COR_DEBUG_STEP_RANGE *ranges, SIZE_T rangeCount);
+
+ // Return true if this stepper is alive, but frozen. (we freeze when the stepper
+ // enters a nested func-eval).
+ bool IsFrozen();
+
+ // Returns true if this stepper is 'dead' - which happens if a non-frozen stepper
+ // gets a func-eval exit.
+ bool IsDead();
+
+ // Prepare for sending an event.
+ void PrepareForSendEvent(StackTraceTicket ticket);
+
+protected:
+ bool m_stepIn;
+ CorDebugStepReason m_reason; // Why did we stop?
+ FramePointer m_fpStepInto; // if we get a trace call
+ //callback, we may end up completing
+ // a step into. If fp is less than th is
+ // when we stop,
+ // then we're actually in a STEP_CALL
+
+ CorDebugIntercept m_rgfInterceptStop; // If we hit a
+ // frame that's an interceptor (internal or otherwise), should we stop?
+
+ CorDebugUnmappedStop m_rgfMappingStop; // If we hit a frame
+ // that's at an interesting mapping point (prolog, epilog,etc), should
+ // we stop?
+
+ COR_DEBUG_STEP_RANGE * m_range; // Ranges for active steppers are always in native offsets.
+
+ SIZE_T m_rangeCount;
+ SIZE_T m_realRangeCount; // @todo - delete b/c only used for CodePitching & Old-Enc
+
+ // The original step intention.
+ // As the stepper moves through code, it may change its other members.
+ // ranges may get deleted, m_stepIn may get toggled, etc.
+ // So we can't recover the original step direction from our other fields.
+ // We need to know the original direction (as well as m_fp) so we know
+ // if the frame we want to stop in is valid.
+ //
+ // Note that we can't really tell this by looking at our other state variables.
+ // For example, a single-instruction step looks like a step-over.
+ enum EStepMode
+ {
+ cStepOver, // Stop in level above or at m_fp.
+ cStepIn, // Stop in level above, below, or at m_fp.
+ cStepOut // Only stop in level above m_fp
+ } m_eMode;
+
+ // The frame that the stepper was originally created in.
+ // This is the only frame that the ranges are valid in.
+ FramePointer m_fp;
+
+#if defined(WIN64EXCEPTIONS)
+ // This frame pointer is used for funclet stepping.
+ // See IsRangeAppropriate() for more information.
+ FramePointer m_fpParentMethod;
+#endif // WIN64EXCEPTIONS
+
+ //m_fpException is 0 if we haven't stepped into an exception,
+ // and is ignored. If we get a TriggerUnwind while mid-step, we note
+ // the value of frame here, and use that to figure out if we should stop.
+ FramePointer m_fpException;
+ MethodDesc * m_fdException;
+
+ // Counter of FuncEvalEnter/Exits - used to determine if we're entering / exiting
+ // a func-eval.
+ int m_cFuncEvalNesting;
+
+ // To freeze a stepper, we disable all triggers. We have to remember that so that
+ // we can reenable them on Thaw.
+ DWORD m_bvFrozenTriggers;
+
+ // Values to use in m_bvFrozenTriggers.
+ enum ETriggers
+ {
+ kSingleStep = 0x1,
+ kMethodEnter = 0x2,
+ };
+
+
+ void EnableJMCBackStop(MethodDesc * pStartMethod);
+
+#ifdef _DEBUG
+ // MethodDesc that the Stepin started in.
+ // This is used for the JMC-backstop.
+ MethodDesc * m_StepInStartMethod;
+
+ // This flag is to ensure that PrepareForSendEvent is called before SendEvent.
+ bool m_fReadyToSend;
+#endif
+};
+
+
+
+/* ------------------------------------------------------------------------- *
+ * DebuggerJMCStepper routines
+ * ------------------------------------------------------------------------- */
+class DebuggerJMCStepper : public DebuggerStepper
+{
+public:
+ DebuggerJMCStepper(Thread *thread,
+ CorDebugUnmappedStop rgfMappingStop,
+ CorDebugIntercept interceptStop,
+ AppDomain *appDomain);
+ ~DebuggerJMCStepper();
+
+ virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
+ { return DEBUGGER_CONTROLLER_JMC_STEPPER; }
+
+ virtual void EnablePolyTraceCall();
+protected:
+ virtual void TrapStepNext(ControllerStackInfo *info);
+ virtual bool TrapStepInHelper(ControllerStackInfo * pInfo,
+ const BYTE * ipCallTarget,
+ const BYTE * ipNext,
+ bool fCallingIntoFunclet);
+ virtual bool IsInterestingFrame(FrameInfo * pFrame);
+ virtual void TriggerMethodEnter(Thread * thread, DebuggerJitInfo * dji, const BYTE * ip, FramePointer fp);
+ virtual bool DetectHandleNonUserCode(ControllerStackInfo *info, DebuggerMethodInfo * pInfo);
+ virtual bool DetectHandleInterceptors(ControllerStackInfo *info);
+
+
+private:
+
+};
+
+
+/* ------------------------------------------------------------------------- *
+ * DebuggerThreadStarter routines
+ * ------------------------------------------------------------------------- */
+// DebuggerThreadStarter: Once triggered, it sends the thread attach
+// message to the right side (where the CreateThread managed callback
+// gets called). It then promptly disappears, as it's only purpose is to
+// alert the right side that a new thread has begun execution.
+class DebuggerThreadStarter : public DebuggerController
+{
+public:
+ DebuggerThreadStarter(Thread *thread);
+
+ virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
+ { return DEBUGGER_CONTROLLER_THREAD_STARTER; }
+
+private:
+ TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy);
+ void TriggerTraceCall(Thread *thread, const BYTE *ip);
+ bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
+};
+
+/* ------------------------------------------------------------------------- *
+ * DebuggerUserBreakpoint routines. UserBreakpoints are used
+ * by Runtime threads to send that they've hit a user breakpoint to the
+ * Right Side.
+ * ------------------------------------------------------------------------- */
+class DebuggerUserBreakpoint : public DebuggerStepper
+{
+public:
+ static void HandleDebugBreak(Thread * pThread);
+
+ static bool IsFrameInDebuggerNamespace(FrameInfo * pFrame);
+
+ virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
+ { return DEBUGGER_CONTROLLER_USER_BREAKPOINT; }
+private:
+ // Don't construct these directly. Use HandleDebugBreak().
+ DebuggerUserBreakpoint(Thread *thread);
+
+
+ virtual bool IsInterestingFrame(FrameInfo * pFrame);
+
+ bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
+};
+
+/* ------------------------------------------------------------------------- *
+ * DebuggerFuncEvalComplete routines
+ * ------------------------------------------------------------------------- */
+class DebuggerFuncEvalComplete : public DebuggerController
+{
+public:
+ DebuggerFuncEvalComplete(Thread *thread,
+ void *dest);
+
+ virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
+ { return DEBUGGER_CONTROLLER_FUNC_EVAL_COMPLETE; }
+
+private:
+ TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy);
+ bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
+ DebuggerEval* m_pDE;
+};
+
+// continuable-exceptions
+/* ------------------------------------------------------------------------- *
+ * DebuggerContinuableExceptionBreakpoint routines
+ * ------------------------------------------------------------------------- *
+ *
+ * DebuggerContinuableExceptionBreakpoint: Implementation of Continuable Exception support uses this.
+ */
+class DebuggerContinuableExceptionBreakpoint : public DebuggerController
+{
+public:
+ DebuggerContinuableExceptionBreakpoint(Thread *pThread,
+ SIZE_T m_offset,
+ DebuggerJitInfo *jitInfo,
+ AppDomain *pAppDomain);
+
+ virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
+ { return DEBUGGER_CONTROLLER_CONTINUABLE_EXCEPTION; }
+
+private:
+ TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy);
+
+ bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
+};
+
+#ifdef EnC_SUPPORTED
+//---------------------------------------------------------------------------------------
+//
+// DebuggerEnCBreakpoint - used by edit and continue to support remapping
+//
+// When a method is updated, we make no immediate attempt to remap any existing execution
+// of the old method. Instead we mine the old method with EnC breakpoints, and prompt the
+// debugger whenever one is hit, giving it the opportunity to request a remap to the
+// latest version of the method.
+//
+// Over long debugging sessions which make many edits to large methods, we can create
+// a large number of these breakpoints. We currently make no attempt to reclaim the
+// code or patch overhead for old methods. Ideally we'd be able to detect when there are
+// no outstanding references to the old method version and clean up after it. At the
+// very least, we could remove all but the first patch when there are no outstanding
+// frames for a specific version of an edited method.
+//
+class DebuggerEnCBreakpoint : public DebuggerController
+{
+public:
+ // We have two types of EnC breakpoints. The first is the one we
+ // sprinkle through old code to let us know when execution is occuring
+ // in a function that now has a new version. The second is when we've
+ // actually resumed excecution into a remapped function and we need
+ // to then notify the debugger.
+ enum TriggerType {REMAP_PENDING, REMAP_COMPLETE};
+
+ // Create and activate an EnC breakpoint at the specified native offset
+ DebuggerEnCBreakpoint(SIZE_T m_offset,
+ DebuggerJitInfo *jitInfo,
+ TriggerType fTriggerType,
+ AppDomain *pAppDomain);
+
+ virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
+ { return DEBUGGER_CONTROLLER_ENC; }
+
+private:
+ TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy);
+
+ TP_RESULT HandleRemapComplete(DebuggerControllerPatch *patch,
+ Thread *thread,
+ TRIGGER_WHY tyWhy);
+
+ DebuggerJitInfo *m_jitInfo;
+ TriggerType m_fTriggerType;
+};
+#endif //EnC_SUPPORTED
+
+/* ========================================================================= */
+
+enum
+{
+ EVENTS_INIT_ALLOC = 5
+};
+
+class DebuggerControllerQueue
+{
+ DebuggerController **m_events;
+ DWORD m_dwEventsCount;
+ DWORD m_dwEventsAlloc;
+ DWORD m_dwNewEventsAlloc;
+
+public:
+ DebuggerControllerQueue()
+ : m_events(NULL),
+ m_dwEventsCount(0),
+ m_dwEventsAlloc(0),
+ m_dwNewEventsAlloc(0)
+ {
+ }
+
+
+ ~DebuggerControllerQueue()
+ {
+ if (m_events != NULL)
+ delete [] m_events;
+ }
+
+ BOOL dcqEnqueue(DebuggerController *dc, BOOL fSort)
+ {
+ LOG((LF_CORDB, LL_INFO100000,"DCQ::dcqE\n"));
+
+ _ASSERTE( dc != NULL );
+
+ if (m_dwEventsCount == m_dwEventsAlloc)
+ {
+ if (m_events == NULL)
+ m_dwNewEventsAlloc = EVENTS_INIT_ALLOC;
+ else
+ m_dwNewEventsAlloc = m_dwEventsAlloc<<1;
+
+ DebuggerController **newEvents = new (nothrow) DebuggerController * [m_dwNewEventsAlloc];
+
+ if (newEvents == NULL)
+ return FALSE;
+
+ if (m_events != NULL)
+ // The final argument to CopyMemory cannot over/underflow.
+ // The amount of memory copied has a strict upper bound of the size of the array,
+ // which cannot exceed the pointer size for the platform.
+ CopyMemory(newEvents, m_events, (SIZE_T)sizeof(*m_events) * (SIZE_T)m_dwEventsAlloc);
+
+ m_events = newEvents;
+ m_dwEventsAlloc = m_dwNewEventsAlloc;
+ }
+
+ dc->Enqueue();
+
+ // Make sure to place high priority patches into
+ // the event list first. This ensures, for
+ // example, that thread starts fire before
+ // breakpoints.
+ if (fSort && (m_dwEventsCount > 0))
+ {
+ DWORD i;
+ for (i = 0; i < m_dwEventsCount; i++)
+ {
+ _ASSERTE(m_events[i] != NULL);
+
+ if (m_events[i]->GetDCType() > dc->GetDCType())
+ {
+ // The final argument to CopyMemory cannot over/underflow.
+ // The amount of memory copied has a strict upper bound of the size of the array,
+ // which cannot exceed the pointer size for the platform.
+ MoveMemory(&m_events[i+1], &m_events[i], (SIZE_T)sizeof(DebuggerController*) * (SIZE_T)(m_dwEventsCount - i));
+ m_events[i] = dc;
+ break;
+ }
+ }
+
+ if (i == m_dwEventsCount)
+ m_events[m_dwEventsCount] = dc;
+
+ m_dwEventsCount++;
+ }
+ else
+ m_events[m_dwEventsCount++] = dc;
+
+ return TRUE;
+ }
+
+ DWORD dcqGetCount(void)
+ {
+ return m_dwEventsCount;
+ }
+
+ DebuggerController *dcqGetElement(DWORD dwElement)
+ {
+ LOG((LF_CORDB, LL_INFO100000,"DCQ::dcqGE\n"));
+
+ DebuggerController *dcp = NULL;
+
+ _ASSERTE(dwElement < m_dwEventsCount);
+ if (dwElement < m_dwEventsCount)
+ {
+ dcp = m_events[dwElement];
+ }
+
+ _ASSERTE(dcp != NULL);
+ return dcp;
+ }
+
+ // Kinda wacked, but this actually releases stuff in FILO order, not
+ // FIFO order. If we do this in an extra loop, then the perf
+ // is better than sliding everything down one each time.
+ void dcqDequeue(DWORD dw = 0xFFffFFff)
+ {
+ if (dw == 0xFFffFFff)
+ {
+ dw = (m_dwEventsCount - 1);
+ }
+
+ LOG((LF_CORDB, LL_INFO100000,"DCQ::dcqD element index "
+ "0x%x of 0x%x\n", dw, m_dwEventsCount));
+
+ _ASSERTE(dw < m_dwEventsCount);
+
+ m_events[dw]->Dequeue();
+
+ // Note that if we're taking the element off the end (m_dwEventsCount-1),
+ // the following will no-op.
+ // The final argument to MoveMemory cannot over/underflow.
+ // The amount of memory copied has a strict upper bound of the size of the array,
+ // which cannot exceed the pointer size for the platform.
+ MoveMemory(&(m_events[dw]),
+ &(m_events[dw + 1]),
+ (SIZE_T)sizeof(DebuggerController *) * (SIZE_T)(m_dwEventsCount - dw - 1));
+ m_dwEventsCount--;
+ }
+};
+
+// Include all of the inline stuff now.
+#include "controller.inl"
+
+#endif // !DACCESS_COMPILE
+
+#endif /* CONTROLLER_H_ */
diff --git a/src/debug/ee/controller.inl b/src/debug/ee/controller.inl
new file mode 100644
index 0000000000..376c321da8
--- /dev/null
+++ b/src/debug/ee/controller.inl
@@ -0,0 +1,57 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: controller.inl
+//
+
+//
+// Inline definitions for the Left-Side of the CLR debugging services
+// This is logically part of the header file.
+//
+//*****************************************************************************
+
+#ifndef CONTROLLER_INL_
+#define CONTROLLER_INL_
+
+inline BOOL DebuggerControllerPatch::IsBreakpointPatch()
+{
+ return (controller->GetDCType() == DEBUGGER_CONTROLLER_BREAKPOINT);
+}
+
+inline BOOL DebuggerControllerPatch::IsStepperPatch()
+{
+ return (controller->IsStepperDCType());
+}
+
+inline DebuggerPatchKind DebuggerControllerPatch::GetKind()
+{
+ return kind;
+}
+inline BOOL DebuggerControllerPatch::IsILMasterPatch()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (kind == PATCH_KIND_IL_MASTER);
+}
+
+inline BOOL DebuggerControllerPatch::IsILSlavePatch()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (kind == PATCH_KIND_IL_SLAVE);
+}
+
+inline BOOL DebuggerControllerPatch::IsManagedPatch()
+{
+ return (IsILMasterPatch() || IsILSlavePatch() || kind == PATCH_KIND_NATIVE_MANAGED);
+
+}
+inline BOOL DebuggerControllerPatch::IsNativePatch()
+{
+ return (kind == PATCH_KIND_NATIVE_MANAGED || kind == PATCH_KIND_NATIVE_UNMANAGED || (IsILSlavePatch() && !offsetIsIL));
+
+}
+
+#endif // CONTROLLER_INL_
diff --git a/src/debug/ee/dac/.gitmirror b/src/debug/ee/dac/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/debug/ee/dac/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/debug/ee/dac/CMakeLists.txt b/src/debug/ee/dac/CMakeLists.txt
new file mode 100644
index 0000000000..9a48058b83
--- /dev/null
+++ b/src/debug/ee/dac/CMakeLists.txt
@@ -0,0 +1,4 @@
+
+include(${CLR_DIR}/dac.cmake)
+
+add_library(cordbee_dac ${CORDBEE_SOURCES_DAC})
diff --git a/src/debug/ee/dac/dirs.proj b/src/debug/ee/dac/dirs.proj
new file mode 100644
index 0000000000..8b766561f5
--- /dev/null
+++ b/src/debug/ee/dac/dirs.proj
@@ -0,0 +1,19 @@
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <!--Import the settings-->
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\clr.props" />
+
+ <!--The following projects will build during PHASE 1-->
+ <PropertyGroup>
+ <BuildInPhase1>true</BuildInPhase1>
+ <BuildInPhaseDefault>false</BuildInPhaseDefault>
+ <BuildCoreBinaries>true</BuildCoreBinaries>
+ <BuildSysBinaries>true</BuildSysBinaries>
+ </PropertyGroup>
+
+ <ItemGroup Condition="'$(BuildExePhase)' == '1'">
+ <ProjectFile Include="HostLocal\dacwks.nativeproj" />
+ </ItemGroup>
+
+ <!--Import the targets-->
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\tools\Microsoft.DevDiv.Traversal.targets" />
+</Project>
diff --git a/src/debug/ee/datatest.h b/src/debug/ee/datatest.h
new file mode 100644
index 0000000000..e5483baca0
--- /dev/null
+++ b/src/debug/ee/datatest.h
@@ -0,0 +1,59 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: DataTest.h
+//
+
+//
+// Implement a self-test for the correct detection of when the target holds a
+// lock we encounter in the DAC.
+//
+//*****************************************************************************
+
+#ifndef DATA_TEST_H
+#define DATA_TEST_H
+
+// This class is used to test our ability to detect from the RS when the target has taken a lock.
+// When the DAC executes a code path that takes a lock, we need to know if the target is holding it.
+// If it is, then we assume that the locked data is in an inconsistent state. In that case, we don't
+// want to report the data; we just want to throw an exception.
+// This functionality in this class lets us take a lock on the LS and then signal the RS to try to
+// detect whether the lock is held. The main function in this class is TestDataSafety. It deterministically
+// signals the RS at key points to execute a code path that takes a lock and also passes a flag to indicate
+// whether the LS actually holds the lock. With this information, we can ascertain that our lock detection
+// code is working correctly. Without this special test function, it would be nearly impossible to test this
+// in any kind of deterministic way.
+//
+// The test will run in either debug or retail builds, as long as the environment variable TestDataConsistency
+// is turned on. It runs once in code:Debugger::Startup. The RS part of the test is in the cases
+// DB_IPCE_TEST_CRST and DB_IPCE_TEST_RWLOCK in code:CordbProcess::RawDispatchEvent.
+class DataTest
+{
+public:
+ // constructor
+ DataTest():
+ m_crst1(CrstDataTest1),
+ m_crst2(CrstDataTest2),
+ m_rwLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT) {};
+
+ // Takes a series of locks in various ways and signals the RS to test the locks at interesting
+ // points to ensure we reliably detect when the LS holds a lock.
+ void TestDataSafety();
+private:
+ // Send an event to the RS to signal that it should test to determine if a crst is held.
+ // This is for testing purposes only.
+ void SendDbgCrstEvent(Crst * pCrst, bool okToTake);
+
+ // Send an event to the RS to signal that it should test to determine if a SimpleRWLock is held.
+ // This is for testing purposes only.
+ void SendDbgRWLockEvent(SimpleRWLock * pRWLock, bool okToTake);
+
+private:
+ // The locks must be data members (rather than locals in TestDataSafety) so we can ensure that
+ // they are target instances.
+ Crst m_crst1, m_crst2; // crsts to be taken for testing
+ SimpleRWLock m_rwLock; // SimpleRWLock to be taken for testing
+};
+#endif // DATA_TEST_H
diff --git a/src/debug/ee/dbgtransportproxy.cpp b/src/debug/ee/dbgtransportproxy.cpp
new file mode 100644
index 0000000000..7847d9aff3
--- /dev/null
+++ b/src/debug/ee/dbgtransportproxy.cpp
@@ -0,0 +1,122 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#include "stdafx.h"
+#include "dbgtransportsession.h"
+#include "dbgtransportproxy.h"
+#include "dbgproxy.h"
+
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+
+//
+// Provides access to the debugging proxy process from the left side.
+//
+
+DbgTransportProxy::DbgTransportProxy()
+{
+ memset(this, 0, sizeof(*this));
+}
+
+// Startup and shutdown. Initialization takes the port number (in host byte order) that the left side will
+// wait on for debugger connections.
+HRESULT DbgTransportProxy::Init(unsigned short usPort)
+{
+ // Query the debugger configuration for the current user, this will give us the port number the proxy is
+ // using. By the time the this method is called we know that debugging is configured on for this process.
+ DbgConfiguration sDbgConfig;
+ if (!GetDebuggerConfiguration(&sDbgConfig))
+ return E_OUTOFMEMORY;
+ _ASSERTE(sDbgConfig.m_fEnabled);
+ m_usProxyPort = sDbgConfig.m_usProxyPort;
+
+ m_usPort = usPort;
+
+ // Initialize some data the proxy needs when we register.
+ m_uiPID = GetCurrentProcessId();
+
+ // Allocate the connection manager and initialize it.
+ m_pConnectionManager = AllocateSecConnMgr();
+ if (m_pConnectionManager == NULL)
+ return E_OUTOFMEMORY;
+
+ SecConnStatus eStatus = m_pConnectionManager->Initialize();
+ if (eStatus != SCS_Success)
+ return eStatus == SCS_OutOfMemory ? E_OUTOFMEMORY : E_FAIL;
+
+ return S_OK;
+}
+
+void DbgTransportProxy::Shutdown()
+{
+ if (m_pConnectionManager)
+ m_pConnectionManager->Destroy();
+}
+
+// Talk with the proxy process and register this instantiation of the runtime with it. The reply from the
+// proxy will indicate whether a debugger wishes to attach to us before any managed code is allowed to
+// run. This method is synchronous and will wait for the reply from the proxy (or a timeout).
+DbgProxyResult DbgTransportProxy::RegisterWithProxy()
+{
+ // Attempt a connection to the proxy. Any failure is treated as the proxy not being there. No time for
+ // retries and timeouts, we're holding up process startup.
+ SecConn *pConnection = NULL;
+ SecConnStatus eStatus = m_pConnectionManager->AllocateConnection(DBGIPC_NTOHL(inet_addr("127.0.0.1")),
+ m_usProxyPort,
+ &pConnection);
+ if (eStatus == SCS_Success)
+ eStatus = pConnection->Connect();
+
+ if (eStatus != SCS_Success)
+ {
+ DbgTransportLog(LC_Proxy, "DbgTransportProxy::RegisterWithProxy(): failed to connect to proxy");
+ if (pConnection)
+ pConnection->Destroy();
+ return RequestTimedOut;
+ }
+
+ // Format a registration message for the proxy.
+ DbgProxyRegisterRuntimeMessage sRequest;
+ sRequest.m_sHeader.m_eType = DPMT_RegisterRuntime;
+ sRequest.m_sHeader.m_uiRequestID = 0;
+ sRequest.m_sHeader.m_uiMagic = DBGPROXY_MAGIC_VALUE(&sRequest.m_sHeader);
+ sRequest.m_sHeader.m_uiReserved = 0;
+ sRequest.m_uiMajorVersion = kCurrentMajorVersion;
+ sRequest.m_uiMinorVersion = kCurrentMinorVersion;
+ sRequest.m_uiPID = m_uiPID;
+ sRequest.m_usPort = m_usPort;
+
+ // Send the message. If we can't even do that we act as though the proxy timed out on us (runtime startup
+ // will continue and this process will not be debuggable).
+ if (!pConnection->Send((unsigned char*)&sRequest, sizeof(sRequest)))
+ {
+ DbgTransportLog(LC_Proxy, "DbgTransportProxy::RegisterWithProxy(): failed to send registration to proxy");
+ return RequestTimedOut;
+ }
+
+ // Wait for the reply.
+ DbgProxyMessageHeader sReply;
+ if (!pConnection->Receive((unsigned char*)&sReply, sizeof(sReply)))
+ {
+ DbgTransportLog(LC_Proxy, "DbgTransportProxy::RegisterWithProxy(): failed to receive reply from proxy");
+ return RequestTimedOut;
+ }
+
+ // Validate reply.
+ if (sReply.m_eType != DPMT_RuntimeRegistered ||
+ sReply.VariantData.RuntimeRegistered.m_uiMajorVersion != (unsigned)kCurrentMajorVersion ||
+ sReply.m_uiMagic != DBGPROXY_MAGIC_VALUE(&sReply))
+ {
+ DbgTransportLog(LC_Proxy, "DbgTransportProxy::RegisterWithProxy(): bad reply from the proxy");
+ return RequestTimedOut;
+ }
+
+ bool fWaitForDebugger = sReply.VariantData.RuntimeRegistered.m_fWaitForDebuggerAttach;
+ DbgTransportLog(LC_Proxy, "DbgTransportProxy::RegisterWithProxy(): %s for the debugger",
+ fWaitForDebugger ? "Waiting" : "Not waiting");
+ return fWaitForDebugger ? PendingDebuggerAttach : RequestSuccessful;
+}
+
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
diff --git a/src/debug/ee/dbgtransportproxy.h b/src/debug/ee/dbgtransportproxy.h
new file mode 100644
index 0000000000..25f8a501b3
--- /dev/null
+++ b/src/debug/ee/dbgtransportproxy.h
@@ -0,0 +1,51 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+
+#ifndef __DBG_TRANSPORT_PROXY_INCLUDED
+#define __DBG_TRANSPORT_PROXY_INCLUDED
+
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+
+#include "dbgproxy.h"
+
+//
+// Provides access to the debugging proxy process from the left side.
+//
+
+// The answers the proxy can give to us during runtime startup.
+enum DbgProxyResult
+{
+ RequestSuccessful, // Successfully registered the runtime, no debugger is currently interested in us
+ RequestTimedOut, // Timed-out trying to reach the proxy (it's probably not configured or started)
+ PendingDebuggerAttach // Successfully registered the runtime, a debugger wishes to attach before code is run
+};
+
+class DbgTransportProxy
+{
+public:
+ DbgTransportProxy();
+
+ // Startup and shutdown. Initialization takes the port number (in host byte order) that the left side
+ // will wait on for debugger connections.
+ HRESULT Init(unsigned short usPort);
+ void Shutdown();
+
+ // Talk with the proxy process and register this instantiation of the runtime with it. The reply from the
+ // proxy will indicate whether a debugger wishes to attach to us before any managed code is allowed to
+ // run. This method is synchronous and will wait for the reply from the proxy (or a timeout).
+ DbgProxyResult RegisterWithProxy();
+
+private:
+ unsigned int m_uiPID; // PID of the current process
+ unsigned short m_usPort; // Port the LS waits on for debugger connections
+ unsigned short m_usProxyPort; // Port the proxy waits on for requests
+
+ SecConnMgr *m_pConnectionManager; // Factory for network connections
+};
+
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+
+#endif // __DBG_TRANSPORT_PROXY_INCLUDED
diff --git a/src/debug/ee/ddunpack.cpp b/src/debug/ee/ddunpack.cpp
new file mode 100644
index 0000000000..90b65c8a98
--- /dev/null
+++ b/src/debug/ee/ddunpack.cpp
@@ -0,0 +1,4578 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// Unpacker
+//
+// Lives on EE side of the fence
+//
+// Note that this file is generated by ndp\clr\src\Debug\tools\BuildDDMarshal\.
+// Changes should be made to output\DDUnpack_template.cpp in that directory.
+//
+
+
+#include "stdafx.h"
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+#include "dacdbiinterface.h"
+
+#include "ddshared.h"
+
+#include "ddmarshalutil.h"
+
+#include "ddunpack.h"
+
+#include "../shared/stringcopyholder.cpp"
+
+// Suppress PREFast warning about overly large function
+// These functions are automatically generated.
+#if defined(_PREFAST_)
+#pragma warning(disable:21000)
+#endif
+
+// general callback for Callback functions.
+template <class T>
+void GeneralEnumerationCallback(T vmAppDomain, void * pUserData)
+{
+ WriteBuffer * pResult = (WriteBuffer *) pUserData;
+
+ DWORD dw = 1; // Continue
+ WriteToBuffer(pResult, dw);
+ WriteToBuffer(pResult, vmAppDomain);
+}
+
+
+
+
+//
+// These stubs are called by the handler
+//
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT CheckDbiVersion(const DbiVersion * pVersion)
+void DDUnpack::Unpack_CheckDbiVersion(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ DbiVersion _pVersion; // storage
+ const DbiVersion * pVersion = &_pVersion;
+ ReadFromBuffer(pSend, &_pVersion); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->CheckDbiVersion(pVersion); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method CheckDbiVersion
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetLocalInterfaceHashAndTimestamp(DWORD & hash1, DWORD & hash2, DWORD & hash3, DWORD & hash4, DWORD & timestamp1, DWORD & timestamp2)
+void DDUnpack::Unpack_GetLocalInterfaceHashAndTimestamp(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+ // Callbacks not yet implemented
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetLocalInterfaceHashAndTimestamp
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetRemoteInterfaceHashAndTimestamp(DWORD & hash1, DWORD & hash2, DWORD & hash3, DWORD & hash4, DWORD & timestamp1, DWORD & timestamp2)
+void DDUnpack::Unpack_GetRemoteInterfaceHashAndTimestamp(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ DWORD hash1;
+ // hash1 does not need to be copied on input
+ DWORD hash2;
+ // hash2 does not need to be copied on input
+ DWORD hash3;
+ // hash3 does not need to be copied on input
+ DWORD hash4;
+ // hash4 does not need to be copied on input
+ DWORD timestamp1;
+ // timestamp1 does not need to be copied on input
+ DWORD timestamp2;
+ // timestamp2 does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ hash1 = 0xe5ffdbe6;
+ hash2 = 0xf26b43be;
+ hash3 = 0x6c9685ac;
+ hash4 = 0xdd723940;
+ timestamp1 = 0x1cc67fb;
+ timestamp2 = 0xe3ad5a06;
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hash1);
+ WriteToBuffer(pResult, hash2);
+ WriteToBuffer(pResult, hash3);
+ WriteToBuffer(pResult, hash4);
+ WriteToBuffer(pResult, timestamp1);
+ WriteToBuffer(pResult, timestamp2);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetRemoteInterfaceHashAndTimestamp
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT FlushCache()
+void DDUnpack::Unpack_FlushCache(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->FlushCache(); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method FlushCache
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void DacSetTargetConsistencyChecks(bool fEnableAsserts)
+void DDUnpack::Unpack_DacSetTargetConsistencyChecks(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ bool fEnableAsserts;
+ ReadFromBuffer(pSend, fEnableAsserts);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->DacSetTargetConsistencyChecks(fEnableAsserts); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method DacSetTargetConsistencyChecks
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void Destroy()
+void DDUnpack::Unpack_Destroy(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->Destroy(); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method Destroy
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL IsLeftSideInitialized()
+void DDUnpack::Unpack_IsLeftSideInitialized(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsLeftSideInitialized(); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsLeftSideInitialized
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// VMPTR_AppDomain GetAppDomainFromId(ULONG appdomainId)
+void DDUnpack::Unpack_GetAppDomainFromId(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ ULONG appdomainId;
+ ReadFromBuffer(pSend, appdomainId);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ VMPTR_AppDomain _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetAppDomainFromId(appdomainId); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetAppDomainFromId
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// ULONG GetAppDomainId(VMPTR_AppDomain vmAppDomain)
+void DDUnpack::Unpack_GetAppDomainId(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ ULONG _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetAppDomainId(vmAppDomain); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetAppDomainId
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// VMPTR_OBJECTHANDLE GetAppDomainObject(VMPTR_AppDomain vmAppDomain)
+void DDUnpack::Unpack_GetAppDomainObject(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ VMPTR_OBJECTHANDLE _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetAppDomainObject(vmAppDomain); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetAppDomainObject
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL IsDefaultDomain(VMPTR_AppDomain vmAppDomain)
+void DDUnpack::Unpack_IsDefaultDomain(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsDefaultDomain(vmAppDomain); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsDefaultDomain
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetAssemblyFromDomainAssembly(VMPTR_DomainAssembly vmDomainAssembly, VMPTR_Assembly * vmAssembly)
+void DDUnpack::Unpack_GetAssemblyFromDomainAssembly(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_DomainAssembly vmDomainAssembly;
+ ReadFromBuffer(pSend, vmDomainAssembly);
+ VMPTR_Assembly _vmAssembly; // storage
+ VMPTR_Assembly * vmAssembly = &_vmAssembly;
+ // vmAssembly does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetAssemblyFromDomainAssembly(vmDomainAssembly, vmAssembly); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, vmAssembly);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetAssemblyFromDomainAssembly
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL IsAssemblyFullyTrusted(VMPTR_DomainAssembly vmDomainAssembly)
+void DDUnpack::Unpack_IsAssemblyFullyTrusted(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_DomainAssembly vmDomainAssembly;
+ ReadFromBuffer(pSend, vmDomainAssembly);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsAssemblyFullyTrusted(vmDomainAssembly); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsAssemblyFullyTrusted
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetAppDomainFullName(VMPTR_AppDomain vmAppDomain, IStringHolder * pStrName)
+void DDUnpack::Unpack_GetAppDomainFullName(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ StringCopyHolder _pStrName; // storage
+ StringCopyHolder* pStrName = &_pStrName;
+ // pStrName does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetAppDomainFullName(vmAppDomain, pStrName); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pStrName);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetAppDomainFullName
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetModuleSimpleName(VMPTR_Module vmModule, IStringHolder * pStrFilename)
+void DDUnpack::Unpack_GetModuleSimpleName(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Module vmModule;
+ ReadFromBuffer(pSend, vmModule);
+ StringCopyHolder _pStrFilename; // storage
+ StringCopyHolder* pStrFilename = &_pStrFilename;
+ // pStrFilename does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetModuleSimpleName(vmModule, pStrFilename); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pStrFilename);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetModuleSimpleName
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL GetAssemblyPath(VMPTR_Assembly vmAssembly, IStringHolder * pStrFilename)
+void DDUnpack::Unpack_GetAssemblyPath(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Assembly vmAssembly;
+ ReadFromBuffer(pSend, vmAssembly);
+ StringCopyHolder _pStrFilename; // storage
+ StringCopyHolder* pStrFilename = &_pStrFilename;
+ // pStrFilename does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetAssemblyPath(vmAssembly, pStrFilename); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pStrFilename);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetAssemblyPath
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void ResolveTypeReference(const TypeRefData * pTypeRefInfo, TypeRefData * pTargetRefInfo)
+void DDUnpack::Unpack_ResolveTypeReference(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ TypeRefData _pTypeRefInfo; // storage
+ const TypeRefData * pTypeRefInfo = &_pTypeRefInfo;
+ ReadFromBuffer(pSend, &_pTypeRefInfo); // serialize to storage
+ TypeRefData _pTargetRefInfo; // storage
+ TypeRefData * pTargetRefInfo = &_pTargetRefInfo;
+ ReadFromBuffer(pSend, &_pTargetRefInfo); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->ResolveTypeReference(pTypeRefInfo, pTargetRefInfo); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pTargetRefInfo);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method ResolveTypeReference
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL GetModulePath(VMPTR_Module vmModule, IStringHolder * pStrFilename)
+void DDUnpack::Unpack_GetModulePath(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Module vmModule;
+ ReadFromBuffer(pSend, vmModule);
+ StringCopyHolder _pStrFilename; // storage
+ StringCopyHolder* pStrFilename = &_pStrFilename;
+ // pStrFilename does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetModulePath(vmModule, pStrFilename); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pStrFilename);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetModulePath
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL GetModuleNGenPath(VMPTR_Module vmModule, IStringHolder * pStrFilename)
+void DDUnpack::Unpack_GetModuleNGenPath(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Module vmModule;
+ ReadFromBuffer(pSend, vmModule);
+ StringCopyHolder _pStrFilename; // storage
+ StringCopyHolder* pStrFilename = &_pStrFilename;
+ // pStrFilename does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetModuleNGenPath(vmModule, pStrFilename); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pStrFilename);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetModuleNGenPath
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetMetadata(VMPTR_Module vmModule, TargetBuffer * pTargetBuffer)
+void DDUnpack::Unpack_GetMetadata(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Module vmModule;
+ ReadFromBuffer(pSend, vmModule);
+ TargetBuffer _pTargetBuffer; // storage
+ TargetBuffer * pTargetBuffer = &_pTargetBuffer;
+ // pTargetBuffer does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetMetadata(vmModule, pTargetBuffer); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pTargetBuffer);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetMetadata
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetSymbolsBuffer(VMPTR_Module vmModule, TargetBuffer * pTargetBuffer, IDacDbiInterface::SymbolFormat * pSymbolFormat)
+void DDUnpack::Unpack_GetSymbolsBuffer(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Module vmModule;
+ ReadFromBuffer(pSend, vmModule);
+ TargetBuffer _pTargetBuffer; // storage
+ TargetBuffer * pTargetBuffer = &_pTargetBuffer;
+ // pTargetBuffer does not need to be copied on input
+ IDacDbiInterface::SymbolFormat _pSymbolFormat; // storage
+ IDacDbiInterface::SymbolFormat * pSymbolFormat = &_pSymbolFormat;
+ // pSymbolFormat does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetSymbolsBuffer(vmModule, pTargetBuffer, pSymbolFormat); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pTargetBuffer);
+ WriteToBuffer(pResult, pSymbolFormat);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetSymbolsBuffer
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetModuleData(VMPTR_Module vmModule, ModuleInfo * pData)
+void DDUnpack::Unpack_GetModuleData(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Module vmModule;
+ ReadFromBuffer(pSend, vmModule);
+ ModuleInfo _pData; // storage
+ ModuleInfo * pData = &_pData;
+ // pData does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetModuleData(vmModule, pData); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pData);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetModuleData
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetDomainFileData(VMPTR_DomainFile vmDomainFile, DomainFileInfo * pData)
+void DDUnpack::Unpack_GetDomainFileData(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_DomainFile vmDomainFile;
+ ReadFromBuffer(pSend, vmDomainFile);
+ DomainFileInfo _pData; // storage
+ DomainFileInfo * pData = &_pData;
+ // pData does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetDomainFileData(vmDomainFile, pData); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pData);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetDomainFileData
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetModuleForDomainFile(VMPTR_DomainFile vmDomainFile, VMPTR_Module * pModule)
+void DDUnpack::Unpack_GetModuleForDomainFile(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_DomainFile vmDomainFile;
+ ReadFromBuffer(pSend, vmDomainFile);
+ VMPTR_Module _pModule; // storage
+ VMPTR_Module * pModule = &_pModule;
+ // pModule does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetModuleForDomainFile(vmDomainFile, pModule); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pModule);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetModuleForDomainFile
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// IDacDbiInterface::AddressType GetAddressType(CORDB_ADDRESS address)
+void DDUnpack::Unpack_GetAddressType(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS address;
+ ReadFromBuffer(pSend, address);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ IDacDbiInterface::AddressType _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetAddressType(address); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetAddressType
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL IsTransitionStub(CORDB_ADDRESS address)
+void DDUnpack::Unpack_IsTransitionStub(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS address;
+ ReadFromBuffer(pSend, address);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsTransitionStub(address); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsTransitionStub
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetCompilerFlags(VMPTR_DomainFile vmDomainFile, BOOL * pfAllowJITOpts, BOOL * pfEnableEnC)
+void DDUnpack::Unpack_GetCompilerFlags(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_DomainFile vmDomainFile;
+ ReadFromBuffer(pSend, vmDomainFile);
+ BOOL _pfAllowJITOpts; // storage
+ BOOL * pfAllowJITOpts = &_pfAllowJITOpts;
+ // pfAllowJITOpts does not need to be copied on input
+ BOOL _pfEnableEnC; // storage
+ BOOL * pfEnableEnC = &_pfEnableEnC;
+ // pfEnableEnC does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetCompilerFlags(vmDomainFile, pfAllowJITOpts, pfEnableEnC); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pfAllowJITOpts);
+ WriteToBuffer(pResult, pfEnableEnC);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetCompilerFlags
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT SetCompilerFlags(VMPTR_DomainFile vmDomainFile, BOOL fAllowJitOpts, BOOL fEnableEnC)
+void DDUnpack::Unpack_SetCompilerFlags(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_DomainFile vmDomainFile;
+ ReadFromBuffer(pSend, vmDomainFile);
+ BOOL fAllowJitOpts;
+ ReadFromBuffer(pSend, fAllowJitOpts);
+ BOOL fEnableEnC;
+ ReadFromBuffer(pSend, fEnableEnC);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->SetCompilerFlags(vmDomainFile, fAllowJitOpts, fEnableEnC); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method SetCompilerFlags
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void EnumerateAppDomains(IDacDbiInterface::FP_APPDOMAIN_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+void DDUnpack::Unpack_EnumerateAppDomains(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ // Extra stuff for callback handlers
+ m_pReal->EnumerateAppDomains(GeneralEnumerationCallback, pResult);
+
+ }
+ EX_CATCH_HRESULT(hr);
+
+ // Sentinel for callback list
+ DWORD dw = 2; // Stop
+ WriteToBuffer(pResult, dw);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method EnumerateAppDomains
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void EnumerateAssembliesInAppDomain(VMPTR_AppDomain vmAppDomain, IDacDbiInterface::FP_ASSEMBLY_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+void DDUnpack::Unpack_EnumerateAssembliesInAppDomain(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ // Extra stuff for callback handlers
+ m_pReal->EnumerateAssembliesInAppDomain(vmAppDomain, GeneralEnumerationCallback, pResult);
+
+ }
+ EX_CATCH_HRESULT(hr);
+
+ // Sentinel for callback list
+ DWORD dw = 2; // Stop
+ WriteToBuffer(pResult, dw);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method EnumerateAssembliesInAppDomain
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void EnumerateModulesInAssembly(VMPTR_DomainAssembly vmAssembly, IDacDbiInterface::FP_MODULE_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+void DDUnpack::Unpack_EnumerateModulesInAssembly(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_DomainAssembly vmAssembly;
+ ReadFromBuffer(pSend, vmAssembly);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ // Extra stuff for callback handlers
+ m_pReal->EnumerateModulesInAssembly(vmAssembly, GeneralEnumerationCallback, pResult);
+
+ }
+ EX_CATCH_HRESULT(hr);
+
+ // Sentinel for callback list
+ DWORD dw = 2; // Stop
+ WriteToBuffer(pResult, dw);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method EnumerateModulesInAssembly
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void RequestSyncAtEvent()
+void DDUnpack::Unpack_RequestSyncAtEvent(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->RequestSyncAtEvent(); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method RequestSyncAtEvent
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void MarkDebuggerAttachPending()
+void DDUnpack::Unpack_MarkDebuggerAttachPending(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->MarkDebuggerAttachPending(); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method MarkDebuggerAttachPending
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void MarkDebuggerAttached(BOOL fAttached)
+void DDUnpack::Unpack_MarkDebuggerAttached(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ BOOL fAttached;
+ ReadFromBuffer(pSend, fAttached);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->MarkDebuggerAttached(fAttached); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method MarkDebuggerAttached
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void Hijack(VMPTR_Thread vmThread, ULONG32 dwThreadId, const EXCEPTION_RECORD * pRecord, T_CONTEXT * pOriginalContext, ULONG32 cbSizeContext, EHijackReason::EHijackReason reason, void * pUserData, CORDB_ADDRESS * pRemoteContextAddr)
+void DDUnpack::Unpack_Hijack(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+ // Callbacks not yet implemented
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method Hijack
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void EnumerateConnections(IDacDbiInterface::FP_CONNECTION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+void DDUnpack::Unpack_EnumerateConnections(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+ // Callbacks not yet implemented
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method EnumerateConnections
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void EnumerateThreads(IDacDbiInterface::FP_THREAD_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+void DDUnpack::Unpack_EnumerateThreads(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ // Extra stuff for callback handlers
+ m_pReal->EnumerateThreads(GeneralEnumerationCallback, pResult);
+
+ }
+ EX_CATCH_HRESULT(hr);
+
+ // Sentinel for callback list
+ DWORD dw = 2; // Stop
+ WriteToBuffer(pResult, dw);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method EnumerateThreads
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// bool IsThreadMarkedDead(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_IsThreadMarkedDead(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ bool _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsThreadMarkedDead(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsThreadMarkedDead
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HANDLE GetThreadHandle(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_GetThreadHandle(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+ // Callbacks not yet implemented
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetThreadHandle
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// VMPTR_OBJECTHANDLE GetThreadObject(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_GetThreadObject(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ VMPTR_OBJECTHANDLE _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetThreadObject(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetThreadObject
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void SetDebugState(VMPTR_Thread vmThread, CorDebugThreadState debugState)
+void DDUnpack::Unpack_SetDebugState(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ CorDebugThreadState debugState;
+ ReadFromBuffer(pSend, debugState);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->SetDebugState(vmThread, debugState); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method SetDebugState
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL HasUnhandledException(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_HasUnhandledException(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->HasUnhandledException(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method HasUnhandledException
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// CorDebugUserState GetUserState(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_GetUserState(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ CorDebugUserState _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetUserState(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetUserState
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// CONNID GetConnectionID(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_GetConnectionID(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ CONNID _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetConnectionID(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetConnectionID
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// TASKID GetTaskID(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_GetTaskID(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ TASKID _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetTaskID(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetTaskID
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// DWORD TryGetVolatileOSThreadID(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_TryGetVolatileOSThreadID(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ DWORD _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->TryGetVolatileOSThreadID(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method TryGetVolatileOSThreadID
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// DWORD GetUniqueThreadID(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_GetUniqueThreadID(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ DWORD _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetUniqueThreadID(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetUniqueThreadID
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// VMPTR_OBJECTHANDLE GetCurrentException(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_GetCurrentException(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ VMPTR_OBJECTHANDLE _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetCurrentException(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetCurrentException
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// VMPTR_OBJECTHANDLE GetCurrentCustomDebuggerNotification(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_GetCurrentCustomDebuggerNotification(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ VMPTR_OBJECTHANDLE _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetCurrentCustomDebuggerNotification(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetCurrentCustomDebuggerNotification
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// VMPTR_AppDomain GetCurrentAppDomain(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_GetCurrentAppDomain(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ VMPTR_AppDomain _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetCurrentAppDomain(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetCurrentAppDomain
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// VMPTR_DomainAssembly ResolveAssembly(VMPTR_DomainFile vmScope, mdToken tkAssemblyRef)
+void DDUnpack::Unpack_ResolveAssembly(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_DomainFile vmScope;
+ ReadFromBuffer(pSend, vmScope);
+ mdToken tkAssemblyRef;
+ ReadFromBuffer(pSend, tkAssemblyRef);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ VMPTR_DomainAssembly _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->ResolveAssembly(vmScope, tkAssemblyRef); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method ResolveAssembly
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetNativeCodeSequencePointsAndVarInfo(VMPTR_MethodDesc vmMethodDesc, CORDB_ADDRESS startAddress, BOOL fCodeAvailabe, NativeVarData * pNativeVarData, SequencePoints * pSequencePoints)
+void DDUnpack::Unpack_GetNativeCodeSequencePointsAndVarInfo(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_MethodDesc vmMethodDesc;
+ ReadFromBuffer(pSend, vmMethodDesc);
+ CORDB_ADDRESS startAddress;
+ ReadFromBuffer(pSend, startAddress);
+ BOOL fCodeAvailabe;
+ ReadFromBuffer(pSend, fCodeAvailabe);
+ NativeVarData _pNativeVarData; // storage
+ NativeVarData * pNativeVarData = &_pNativeVarData;
+ // pNativeVarData does not need to be copied on input
+ SequencePoints _pSequencePoints; // storage
+ SequencePoints * pSequencePoints = &_pSequencePoints;
+ // pSequencePoints does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetNativeCodeSequencePointsAndVarInfo(vmMethodDesc, startAddress, fCodeAvailabe, pNativeVarData, pSequencePoints); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pNativeVarData);
+ WriteToBuffer(pResult, pSequencePoints);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetNativeCodeSequencePointsAndVarInfo
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// VMPTR_CONTEXT GetManagedStoppedContext(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_GetManagedStoppedContext(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ VMPTR_CONTEXT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetManagedStoppedContext(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetManagedStoppedContext
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void CreateStackWalk(VMPTR_Thread vmThread, DT_CONTEXT * pInternalContextBuffer, StackWalkHandle * ppSFIHandle)
+void DDUnpack::Unpack_CreateStackWalk(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ DT_CONTEXT _pInternalContextBuffer; // storage
+ DT_CONTEXT * pInternalContextBuffer = &_pInternalContextBuffer;
+ ReadFromBuffer(pSend, &_pInternalContextBuffer); // serialize to storage
+ StackWalkHandle _ppSFIHandle; // storage
+ StackWalkHandle * ppSFIHandle = &_ppSFIHandle;
+ // ppSFIHandle does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->CreateStackWalk(vmThread, pInternalContextBuffer, ppSFIHandle); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pInternalContextBuffer);
+ WriteToBuffer(pResult, ppSFIHandle);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method CreateStackWalk
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void DeleteStackWalk(StackWalkHandle ppSFIHandle)
+void DDUnpack::Unpack_DeleteStackWalk(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ StackWalkHandle ppSFIHandle;
+ ReadFromBuffer(pSend, ppSFIHandle);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->DeleteStackWalk(ppSFIHandle); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method DeleteStackWalk
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetStackWalkCurrentContext(StackWalkHandle pSFIHandle, DT_CONTEXT * pContext)
+void DDUnpack::Unpack_GetStackWalkCurrentContext(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ StackWalkHandle pSFIHandle;
+ ReadFromBuffer(pSend, pSFIHandle);
+ DT_CONTEXT _pContext; // storage
+ DT_CONTEXT * pContext = &_pContext;
+ ReadFromBuffer(pSend, &_pContext); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetStackWalkCurrentContext(pSFIHandle, pContext); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pContext);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetStackWalkCurrentContext
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void SetStackWalkCurrentContext(VMPTR_Thread vmThread, StackWalkHandle pSFIHandle, CorDebugSetContextFlag flag, DT_CONTEXT * pContext)
+void DDUnpack::Unpack_SetStackWalkCurrentContext(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ StackWalkHandle pSFIHandle;
+ ReadFromBuffer(pSend, pSFIHandle);
+ CorDebugSetContextFlag flag;
+ ReadFromBuffer(pSend, flag);
+ DT_CONTEXT _pContext; // storage
+ DT_CONTEXT * pContext = &_pContext;
+ ReadFromBuffer(pSend, &_pContext); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->SetStackWalkCurrentContext(vmThread, pSFIHandle, flag, pContext); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pContext);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method SetStackWalkCurrentContext
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL UnwindStackWalkFrame(StackWalkHandle pSFIHandle)
+void DDUnpack::Unpack_UnwindStackWalkFrame(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ StackWalkHandle pSFIHandle;
+ ReadFromBuffer(pSend, pSFIHandle);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->UnwindStackWalkFrame(pSFIHandle); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method UnwindStackWalkFrame
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT CheckContext(VMPTR_Thread vmThread, const DT_CONTEXT * pContext)
+void DDUnpack::Unpack_CheckContext(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ DT_CONTEXT _pContext; // storage
+ const DT_CONTEXT * pContext = &_pContext;
+ ReadFromBuffer(pSend, &_pContext); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->CheckContext(vmThread, pContext); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method CheckContext
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// IDacDbiInterface::FrameType GetStackWalkCurrentFrameInfo(StackWalkHandle pSFIHandle, DebuggerIPCE_STRData * pFrameData)
+void DDUnpack::Unpack_GetStackWalkCurrentFrameInfo(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ StackWalkHandle pSFIHandle;
+ ReadFromBuffer(pSend, pSFIHandle);
+ DebuggerIPCE_STRData _pFrameData; // storage
+ DebuggerIPCE_STRData * pFrameData = &_pFrameData;
+ ReadFromBuffer(pSend, &_pFrameData); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ IDacDbiInterface::FrameType _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetStackWalkCurrentFrameInfo(pSFIHandle, pFrameData); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pFrameData);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetStackWalkCurrentFrameInfo
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// ULONG32 GetCountOfInternalFrames(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_GetCountOfInternalFrames(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ ULONG32 _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetCountOfInternalFrames(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetCountOfInternalFrames
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void EnumerateInternalFrames(VMPTR_Thread vmThread, IDacDbiInterface::FP_INTERNAL_FRAME_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+void DDUnpack::Unpack_EnumerateInternalFrames(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ // Extra stuff for callback handlers
+ m_pReal->EnumerateInternalFrames(vmThread, GeneralEnumerationCallback, pResult);
+
+ }
+ EX_CATCH_HRESULT(hr);
+
+ // Sentinel for callback list
+ DWORD dw = 2; // Stop
+ WriteToBuffer(pResult, dw);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method EnumerateInternalFrames
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL IsMatchingParentFrame(FramePointer fpToCheck, FramePointer fpParent)
+void DDUnpack::Unpack_IsMatchingParentFrame(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ FramePointer fpToCheck;
+ ReadFromBuffer(pSend, fpToCheck);
+ FramePointer fpParent;
+ ReadFromBuffer(pSend, fpParent);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsMatchingParentFrame(fpToCheck, fpParent); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsMatchingParentFrame
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// ULONG32 GetStackParameterSize(CORDB_ADDRESS controlPC)
+void DDUnpack::Unpack_GetStackParameterSize(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS controlPC;
+ ReadFromBuffer(pSend, controlPC);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ ULONG32 _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetStackParameterSize(controlPC); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetStackParameterSize
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// FramePointer GetFramePointer(StackWalkHandle pSFIHandle)
+void DDUnpack::Unpack_GetFramePointer(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ StackWalkHandle pSFIHandle;
+ ReadFromBuffer(pSend, pSFIHandle);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ FramePointer _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetFramePointer(pSFIHandle); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetFramePointer
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL IsLeafFrame(VMPTR_Thread vmThread, const DT_CONTEXT * pContext)
+void DDUnpack::Unpack_IsLeafFrame(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ DT_CONTEXT _pContext; // storage
+ const DT_CONTEXT * pContext = &_pContext;
+ ReadFromBuffer(pSend, &_pContext); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsLeafFrame(vmThread, pContext); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsLeafFrame
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetContext(VMPTR_Thread vmThread, DT_CONTEXT * pContextBuffer)
+void DDUnpack::Unpack_GetContext(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ DT_CONTEXT _pContextBuffer; // storage
+ DT_CONTEXT * pContextBuffer = &_pContextBuffer;
+ ReadFromBuffer(pSend, &_pContextBuffer); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetContext(vmThread, pContextBuffer); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pContextBuffer);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetContext
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void ConvertContextToDebuggerRegDisplay(const DT_CONTEXT * pInContext, DebuggerREGDISPLAY * pOutDRD, BOOL fActive)
+void DDUnpack::Unpack_ConvertContextToDebuggerRegDisplay(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ DT_CONTEXT _pInContext; // storage
+ const DT_CONTEXT * pInContext = &_pInContext;
+ ReadFromBuffer(pSend, &_pInContext); // serialize to storage
+ DebuggerREGDISPLAY _pOutDRD; // storage
+ DebuggerREGDISPLAY * pOutDRD = &_pOutDRD;
+ ReadFromBuffer(pSend, &_pOutDRD); // serialize to storage
+ BOOL fActive;
+ ReadFromBuffer(pSend, fActive);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->ConvertContextToDebuggerRegDisplay(pInContext, pOutDRD, fActive); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pOutDRD);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method ConvertContextToDebuggerRegDisplay
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// IDacDbiInterface::DynamicMethodType IsILStubOrLCGMethod(VMPTR_MethodDesc vmMethodDesc)
+void DDUnpack::Unpack_IsILStubOrLCGMethod(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_MethodDesc vmMethodDesc;
+ ReadFromBuffer(pSend, vmMethodDesc);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ IDacDbiInterface::DynamicMethodType _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsILStubOrLCGMethod(vmMethodDesc); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsILStubOrLCGMethod
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// TargetBuffer GetVarArgSig(CORDB_ADDRESS VASigCookieAddr, CORDB_ADDRESS * pArgBase)
+void DDUnpack::Unpack_GetVarArgSig(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS VASigCookieAddr;
+ ReadFromBuffer(pSend, VASigCookieAddr);
+ CORDB_ADDRESS _pArgBase; // storage
+ CORDB_ADDRESS * pArgBase = &_pArgBase;
+ // pArgBase does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ TargetBuffer _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetVarArgSig(VASigCookieAddr, pArgBase); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pArgBase);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetVarArgSig
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL RequiresAlign8(VMPTR_TypeHandle thExact)
+void DDUnpack::Unpack_RequiresAlign8(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_TypeHandle thExact;
+ ReadFromBuffer(pSend, thExact);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->RequiresAlign8(thExact); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method RequiresAlign8
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// GENERICS_TYPE_TOKEN ResolveExactGenericArgsToken(DWORD dwExactGenericArgsTokenIndex, GENERICS_TYPE_TOKEN rawToken)
+void DDUnpack::Unpack_ResolveExactGenericArgsToken(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ DWORD dwExactGenericArgsTokenIndex;
+ ReadFromBuffer(pSend, dwExactGenericArgsTokenIndex);
+ GENERICS_TYPE_TOKEN rawToken;
+ ReadFromBuffer(pSend, rawToken);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ GENERICS_TYPE_TOKEN _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->ResolveExactGenericArgsToken(dwExactGenericArgsTokenIndex, rawToken); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method ResolveExactGenericArgsToken
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetILCodeAndSig(VMPTR_DomainFile vmDomainFile, mdToken functionToken, TargetBuffer * pCodeInfo, mdToken * pLocalSigToken)
+void DDUnpack::Unpack_GetILCodeAndSig(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_DomainFile vmDomainFile;
+ ReadFromBuffer(pSend, vmDomainFile);
+ mdToken functionToken;
+ ReadFromBuffer(pSend, functionToken);
+ TargetBuffer _pCodeInfo; // storage
+ TargetBuffer * pCodeInfo = &_pCodeInfo;
+ // pCodeInfo does not need to be copied on input
+ mdToken _pLocalSigToken; // storage
+ mdToken * pLocalSigToken = &_pLocalSigToken;
+ // pLocalSigToken does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetILCodeAndSig(vmDomainFile, functionToken, pCodeInfo, pLocalSigToken); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pCodeInfo);
+ WriteToBuffer(pResult, pLocalSigToken);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetILCodeAndSig
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetNativeCodeInfo(VMPTR_DomainFile vmDomainFile, mdToken functionToken, NativeCodeFunctionData * pCodeInfo)
+void DDUnpack::Unpack_GetNativeCodeInfo(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_DomainFile vmDomainFile;
+ ReadFromBuffer(pSend, vmDomainFile);
+ mdToken functionToken;
+ ReadFromBuffer(pSend, functionToken);
+ NativeCodeFunctionData _pCodeInfo; // storage
+ NativeCodeFunctionData * pCodeInfo = &_pCodeInfo;
+ // pCodeInfo does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetNativeCodeInfo(vmDomainFile, functionToken, pCodeInfo); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pCodeInfo);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetNativeCodeInfo
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetNativeCodeInfoForAddr(VMPTR_MethodDesc vmMethodDesc, CORDB_ADDRESS hotCodeStartAddr, NativeCodeFunctionData * pCodeInfo)
+void DDUnpack::Unpack_GetNativeCodeInfoForAddr(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_MethodDesc vmMethodDesc;
+ ReadFromBuffer(pSend, vmMethodDesc);
+ CORDB_ADDRESS hotCodeStartAddr;
+ ReadFromBuffer(pSend, hotCodeStartAddr);
+ NativeCodeFunctionData _pCodeInfo; // storage
+ NativeCodeFunctionData * pCodeInfo = &_pCodeInfo;
+ ReadFromBuffer(pSend, &_pCodeInfo); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetNativeCodeInfoForAddr(vmMethodDesc, hotCodeStartAddr, pCodeInfo); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pCodeInfo);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetNativeCodeInfoForAddr
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetClassInfo(VMPTR_AppDomain vmAppDomain, VMPTR_Module vmModule, mdTypeDef metadataToken, VMPTR_TypeHandle thExact, VMPTR_TypeHandle thApprox, ClassInfo * pData)
+void DDUnpack::Unpack_GetClassInfo(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ VMPTR_Module vmModule;
+ ReadFromBuffer(pSend, vmModule);
+ mdTypeDef metadataToken;
+ ReadFromBuffer(pSend, metadataToken);
+ VMPTR_TypeHandle thExact;
+ ReadFromBuffer(pSend, thExact);
+ VMPTR_TypeHandle thApprox;
+ ReadFromBuffer(pSend, thApprox);
+ ClassInfo _pData; // storage
+ ClassInfo * pData = &_pData;
+ ReadFromBuffer(pSend, &_pData); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetClassInfo(vmAppDomain, vmModule, metadataToken, thExact, thApprox, pData); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pData);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetClassInfo
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetInstantiationFieldInfo(VMPTR_DomainFile vmDomainFile, mdTypeDef metadataToken, VMPTR_TypeHandle vmThExact, VMPTR_TypeHandle vmThApprox, DacDbiArrayList<FieldData> * pFieldList, SIZE_T * pObjectSize)
+void DDUnpack::Unpack_GetInstantiationFieldInfo(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_DomainFile vmDomainFile;
+ ReadFromBuffer(pSend, vmDomainFile);
+ mdTypeDef metadataToken;
+ ReadFromBuffer(pSend, metadataToken);
+ VMPTR_TypeHandle vmThExact;
+ ReadFromBuffer(pSend, vmThExact);
+ VMPTR_TypeHandle vmThApprox;
+ ReadFromBuffer(pSend, vmThApprox);
+ DacDbiArrayList<FieldData> _pFieldList; // storage
+ DacDbiArrayList<FieldData> * pFieldList = &_pFieldList;
+ // pFieldList does not need to be copied on input
+ SIZE_T _pObjectSize; // storage
+ SIZE_T * pObjectSize = &_pObjectSize;
+ // pObjectSize does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetInstantiationFieldInfo(vmDomainFile, metadataToken, vmThExact, vmThApprox, pFieldList, pObjectSize); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pFieldList);
+ WriteToBuffer(pResult, pObjectSize);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetInstantiationFieldInfo
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void TypeHandleToExpandedTypeInfo(AreValueTypesBoxed boxed, VMPTR_AppDomain vmAppDomain, VMPTR_TypeHandle vmTypeHandle, DebuggerIPCE_ExpandedTypeData * pTypeInfo)
+void DDUnpack::Unpack_TypeHandleToExpandedTypeInfo(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ AreValueTypesBoxed boxed;
+ ReadFromBuffer(pSend, boxed);
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ VMPTR_TypeHandle vmTypeHandle;
+ ReadFromBuffer(pSend, vmTypeHandle);
+ DebuggerIPCE_ExpandedTypeData _pTypeInfo; // storage
+ DebuggerIPCE_ExpandedTypeData * pTypeInfo = &_pTypeInfo;
+ ReadFromBuffer(pSend, &_pTypeInfo); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->TypeHandleToExpandedTypeInfo(boxed, vmAppDomain, vmTypeHandle, pTypeInfo); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pTypeInfo);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method TypeHandleToExpandedTypeInfo
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetObjectExpandedTypeInfo(AreValueTypesBoxed boxed, VMPTR_AppDomain vmAppDomain, CORDB_ADDRESS addr, DebuggerIPCE_ExpandedTypeData * pTypeInfo)
+void DDUnpack::Unpack_GetObjectExpandedTypeInfo(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ AreValueTypesBoxed boxed;
+ ReadFromBuffer(pSend, boxed);
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ CORDB_ADDRESS addr;
+ ReadFromBuffer(pSend, addr);
+ DebuggerIPCE_ExpandedTypeData _pTypeInfo; // storage
+ DebuggerIPCE_ExpandedTypeData * pTypeInfo = &_pTypeInfo;
+ // pTypeInfo does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetObjectExpandedTypeInfo(boxed, vmAppDomain, addr, pTypeInfo); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pTypeInfo);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetObjectExpandedTypeInfo
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetObjectExpandedTypeInfoFromID(AreValueTypesBoxed boxed, VMPTR_AppDomain vmAppDomain, COR_TYPEID id, DebuggerIPCE_ExpandedTypeData * pTypeInfo)
+void DDUnpack::Unpack_GetObjectExpandedTypeInfoFromID(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ AreValueTypesBoxed boxed;
+ ReadFromBuffer(pSend, boxed);
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ COR_TYPEID id;
+ ReadFromBuffer(pSend, id);
+ DebuggerIPCE_ExpandedTypeData _pTypeInfo; // storage
+ DebuggerIPCE_ExpandedTypeData * pTypeInfo = &_pTypeInfo;
+ // pTypeInfo does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetObjectExpandedTypeInfoFromID(boxed, vmAppDomain, id, pTypeInfo); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pTypeInfo);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetObjectExpandedTypeInfoFromID
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// VMPTR_TypeHandle GetApproxTypeHandle(TypeInfoList * pTypeData)
+void DDUnpack::Unpack_GetApproxTypeHandle(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ TypeInfoList _pTypeData; // storage
+ TypeInfoList * pTypeData = &_pTypeData;
+ ReadFromBuffer(pSend, &_pTypeData); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ VMPTR_TypeHandle _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetApproxTypeHandle(pTypeData); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pTypeData);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetApproxTypeHandle
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT GetExactTypeHandle(DebuggerIPCE_ExpandedTypeData * pTypeData, ArgInfoList * pArgInfo, VMPTR_TypeHandle & vmTypeHandle)
+void DDUnpack::Unpack_GetExactTypeHandle(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ DebuggerIPCE_ExpandedTypeData _pTypeData; // storage
+ DebuggerIPCE_ExpandedTypeData * pTypeData = &_pTypeData;
+ ReadFromBuffer(pSend, &_pTypeData); // serialize to storage
+ ArgInfoList _pArgInfo; // storage
+ ArgInfoList * pArgInfo = &_pArgInfo;
+ ReadFromBuffer(pSend, &_pArgInfo); // serialize to storage
+ VMPTR_TypeHandle vmTypeHandle;
+ ReadFromBuffer(pSend, vmTypeHandle);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetExactTypeHandle(pTypeData, pArgInfo, vmTypeHandle); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pTypeData);
+ WriteToBuffer(pResult, pArgInfo);
+ WriteToBuffer(pResult, vmTypeHandle);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetExactTypeHandle
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetMethodDescParams(VMPTR_AppDomain vmAppDomain, VMPTR_MethodDesc vmMethodDesc, GENERICS_TYPE_TOKEN genericsToken, UINT32 * pcGenericClassTypeParams, TypeParamsList * pGenericTypeParams)
+void DDUnpack::Unpack_GetMethodDescParams(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ VMPTR_MethodDesc vmMethodDesc;
+ ReadFromBuffer(pSend, vmMethodDesc);
+ GENERICS_TYPE_TOKEN genericsToken;
+ ReadFromBuffer(pSend, genericsToken);
+ UINT32 _pcGenericClassTypeParams; // storage
+ UINT32 * pcGenericClassTypeParams = &_pcGenericClassTypeParams;
+ // pcGenericClassTypeParams does not need to be copied on input
+ TypeParamsList _pGenericTypeParams; // storage
+ TypeParamsList * pGenericTypeParams = &_pGenericTypeParams;
+ // pGenericTypeParams does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetMethodDescParams(vmAppDomain, vmMethodDesc, genericsToken, pcGenericClassTypeParams, pGenericTypeParams); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pcGenericClassTypeParams);
+ WriteToBuffer(pResult, pGenericTypeParams);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetMethodDescParams
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// CORDB_ADDRESS GetThreadOrContextStaticAddress(VMPTR_FieldDesc vmField, VMPTR_Thread vmRuntimeThread)
+void DDUnpack::Unpack_GetThreadOrContextStaticAddress(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_FieldDesc vmField;
+ ReadFromBuffer(pSend, vmField);
+ VMPTR_Thread vmRuntimeThread;
+ ReadFromBuffer(pSend, vmRuntimeThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ CORDB_ADDRESS _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetThreadOrContextStaticAddress(vmField, vmRuntimeThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetThreadOrContextStaticAddress
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// CORDB_ADDRESS GetCollectibleTypeStaticAddress(VMPTR_FieldDesc vmField, VMPTR_AppDomain vmAppDomain)
+void DDUnpack::Unpack_GetCollectibleTypeStaticAddress(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_FieldDesc vmField;
+ ReadFromBuffer(pSend, vmField);
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ CORDB_ADDRESS _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetCollectibleTypeStaticAddress(vmField, vmAppDomain); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetCollectibleTypeStaticAddress
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetEnCHangingFieldInfo(const EnCHangingFieldInfo * pEnCFieldInfo, FieldData * pFieldData, BOOL * pfStatic)
+void DDUnpack::Unpack_GetEnCHangingFieldInfo(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ EnCHangingFieldInfo _pEnCFieldInfo; // storage
+ const EnCHangingFieldInfo * pEnCFieldInfo = &_pEnCFieldInfo;
+ ReadFromBuffer(pSend, &_pEnCFieldInfo); // serialize to storage
+ FieldData _pFieldData; // storage
+ FieldData * pFieldData = &_pFieldData;
+ // pFieldData does not need to be copied on input
+ BOOL _pfStatic; // storage
+ BOOL * pfStatic = &_pfStatic;
+ // pfStatic does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetEnCHangingFieldInfo(pEnCFieldInfo, pFieldData, pfStatic); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pFieldData);
+ WriteToBuffer(pResult, pfStatic);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetEnCHangingFieldInfo
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetTypeHandleParams(VMPTR_AppDomain vmAppDomain, VMPTR_TypeHandle vmTypeHandle, TypeParamsList * pParams)
+void DDUnpack::Unpack_GetTypeHandleParams(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ VMPTR_TypeHandle vmTypeHandle;
+ ReadFromBuffer(pSend, vmTypeHandle);
+ TypeParamsList _pParams; // storage
+ TypeParamsList * pParams = &_pParams;
+ // pParams does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetTypeHandleParams(vmAppDomain, vmTypeHandle, pParams); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pParams);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetTypeHandleParams
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetSimpleType(VMPTR_AppDomain vmAppDomain, CorElementType simpleType, mdTypeDef * pMetadataToken, VMPTR_Module * pVmModule, VMPTR_DomainFile * pVmDomainFile)
+void DDUnpack::Unpack_GetSimpleType(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ CorElementType simpleType;
+ ReadFromBuffer(pSend, simpleType);
+ mdTypeDef _pMetadataToken; // storage
+ mdTypeDef * pMetadataToken = &_pMetadataToken;
+ // pMetadataToken does not need to be copied on input
+ VMPTR_Module _pVmModule; // storage
+ VMPTR_Module * pVmModule = &_pVmModule;
+ // pVmModule does not need to be copied on input
+ VMPTR_DomainFile _pVmDomainFile; // storage
+ VMPTR_DomainFile * pVmDomainFile = &_pVmDomainFile;
+ // pVmDomainFile does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetSimpleType(vmAppDomain, simpleType, pMetadataToken, pVmModule, pVmDomainFile); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pMetadataToken);
+ WriteToBuffer(pResult, pVmModule);
+ WriteToBuffer(pResult, pVmDomainFile);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetSimpleType
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL IsExceptionObject(VMPTR_Object vmObject)
+void DDUnpack::Unpack_IsExceptionObject(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Object vmObject;
+ ReadFromBuffer(pSend, vmObject);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsExceptionObject(vmObject); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsExceptionObject
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetStackFramesFromException(VMPTR_Object vmObject, DacDbiArrayList<DacExceptionCallStackData> & dacStackFrames)
+void DDUnpack::Unpack_GetStackFramesFromException(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Object vmObject;
+ ReadFromBuffer(pSend, vmObject);
+ DacDbiArrayList<DacExceptionCallStackData> dacStackFrames;
+ ReadFromBuffer(pSend, dacStackFrames);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetStackFramesFromException(vmObject, dacStackFrames); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, dacStackFrames);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetStackFramesFromException
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL IsRcw(VMPTR_Object vmObject)
+void DDUnpack::Unpack_IsRcw(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Object vmObject;
+ ReadFromBuffer(pSend, vmObject);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsRcw(vmObject); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsRcw
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetRcwCachedInterfaceTypes(VMPTR_Object vmObject, VMPTR_AppDomain vmAppDomain, BOOL bIInspectableOnly, DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pDacInterfaces)
+void DDUnpack::Unpack_GetRcwCachedInterfaceTypes(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Object vmObject;
+ ReadFromBuffer(pSend, vmObject);
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ BOOL bIInspectableOnly;
+ ReadFromBuffer(pSend, bIInspectableOnly);
+ DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> _pDacInterfaces; // storage
+ DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pDacInterfaces = &_pDacInterfaces;
+ // pDacInterfaces does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetRcwCachedInterfaceTypes(vmObject, vmAppDomain, bIInspectableOnly, pDacInterfaces); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pDacInterfaces);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetRcwCachedInterfaceTypes
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetRcwCachedInterfacePointers(VMPTR_Object vmObject, BOOL bIInspectableOnly, DacDbiArrayList<CORDB_ADDRESS> * pDacItfPtrs)
+void DDUnpack::Unpack_GetRcwCachedInterfacePointers(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Object vmObject;
+ ReadFromBuffer(pSend, vmObject);
+ BOOL bIInspectableOnly;
+ ReadFromBuffer(pSend, bIInspectableOnly);
+ DacDbiArrayList<CORDB_ADDRESS> _pDacItfPtrs; // storage
+ DacDbiArrayList<CORDB_ADDRESS> * pDacItfPtrs = &_pDacItfPtrs;
+ // pDacItfPtrs does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetRcwCachedInterfacePointers(vmObject, bIInspectableOnly, pDacItfPtrs); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pDacItfPtrs);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetRcwCachedInterfacePointers
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetCachedWinRTTypesForIIDs(VMPTR_AppDomain vmAppDomain, DacDbiArrayList<GUID> & iids, DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes)
+void DDUnpack::Unpack_GetCachedWinRTTypesForIIDs(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ DacDbiArrayList<GUID> iids;
+ ReadFromBuffer(pSend, iids);
+ DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> _pTypes; // storage
+ DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes = &_pTypes;
+ // pTypes does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetCachedWinRTTypesForIIDs(vmAppDomain, iids, pTypes); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, iids);
+ WriteToBuffer(pResult, pTypes);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetCachedWinRTTypesForIIDs
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetCachedWinRTTypes(VMPTR_AppDomain vmAppDomain, DacDbiArrayList<GUID> * piids, DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes)
+void DDUnpack::Unpack_GetCachedWinRTTypes(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ DacDbiArrayList<GUID> _piids; // storage
+ DacDbiArrayList<GUID> * piids = &_piids;
+ // piids does not need to be copied on input
+ DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> _pTypes; // storage
+ DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes = &_pTypes;
+ // pTypes does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetCachedWinRTTypes(vmAppDomain, piids, pTypes); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, piids);
+ WriteToBuffer(pResult, pTypes);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetCachedWinRTTypes
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetTypedByRefInfo(CORDB_ADDRESS pTypedByRef, VMPTR_AppDomain vmAppDomain, DebuggerIPCE_ObjectData * pObjectData)
+void DDUnpack::Unpack_GetTypedByRefInfo(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS pTypedByRef;
+ ReadFromBuffer(pSend, pTypedByRef);
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ DebuggerIPCE_ObjectData _pObjectData; // storage
+ DebuggerIPCE_ObjectData * pObjectData = &_pObjectData;
+ ReadFromBuffer(pSend, &_pObjectData); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetTypedByRefInfo(pTypedByRef, vmAppDomain, pObjectData); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pObjectData);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetTypedByRefInfo
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetStringData(CORDB_ADDRESS objectAddress, DebuggerIPCE_ObjectData * pObjectData)
+void DDUnpack::Unpack_GetStringData(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS objectAddress;
+ ReadFromBuffer(pSend, objectAddress);
+ DebuggerIPCE_ObjectData _pObjectData; // storage
+ DebuggerIPCE_ObjectData * pObjectData = &_pObjectData;
+ ReadFromBuffer(pSend, &_pObjectData); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetStringData(objectAddress, pObjectData); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pObjectData);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetStringData
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetArrayData(CORDB_ADDRESS objectAddress, DebuggerIPCE_ObjectData * pObjectData)
+void DDUnpack::Unpack_GetArrayData(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS objectAddress;
+ ReadFromBuffer(pSend, objectAddress);
+ DebuggerIPCE_ObjectData _pObjectData; // storage
+ DebuggerIPCE_ObjectData * pObjectData = &_pObjectData;
+ ReadFromBuffer(pSend, &_pObjectData); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetArrayData(objectAddress, pObjectData); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pObjectData);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetArrayData
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetBasicObjectInfo(CORDB_ADDRESS objectAddress, CorElementType type, VMPTR_AppDomain vmAppDomain, DebuggerIPCE_ObjectData * pObjectData)
+void DDUnpack::Unpack_GetBasicObjectInfo(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS objectAddress;
+ ReadFromBuffer(pSend, objectAddress);
+ CorElementType type;
+ ReadFromBuffer(pSend, type);
+ VMPTR_AppDomain vmAppDomain;
+ ReadFromBuffer(pSend, vmAppDomain);
+ DebuggerIPCE_ObjectData _pObjectData; // storage
+ DebuggerIPCE_ObjectData * pObjectData = &_pObjectData;
+ ReadFromBuffer(pSend, &_pObjectData); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetBasicObjectInfo(objectAddress, type, vmAppDomain, pObjectData); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pObjectData);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetBasicObjectInfo
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void TestCrst(VMPTR_Crst vmCrst)
+void DDUnpack::Unpack_TestCrst(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Crst vmCrst;
+ ReadFromBuffer(pSend, vmCrst);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->TestCrst(vmCrst); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method TestCrst
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void TestRWLock(VMPTR_SimpleRWLock vmRWLock)
+void DDUnpack::Unpack_TestRWLock(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_SimpleRWLock vmRWLock;
+ ReadFromBuffer(pSend, vmRWLock);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->TestRWLock(vmRWLock); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method TestRWLock
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// CORDB_ADDRESS GetDebuggerControlBlockAddress()
+void DDUnpack::Unpack_GetDebuggerControlBlockAddress(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ CORDB_ADDRESS _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetDebuggerControlBlockAddress(); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetDebuggerControlBlockAddress
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// VMPTR_Object GetObjectFromRefPtr(CORDB_ADDRESS ptr)
+void DDUnpack::Unpack_GetObjectFromRefPtr(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS ptr;
+ ReadFromBuffer(pSend, ptr);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ VMPTR_Object _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetObjectFromRefPtr(ptr); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetObjectFromRefPtr
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// VMPTR_Object GetObject(CORDB_ADDRESS ptr)
+void DDUnpack::Unpack_GetObject(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS ptr;
+ ReadFromBuffer(pSend, ptr);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ VMPTR_Object _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetObject(ptr); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetObject
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT EnableNGENPolicy(CorDebugNGENPolicy ePolicy)
+void DDUnpack::Unpack_EnableNGENPolicy(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CorDebugNGENPolicy ePolicy;
+ ReadFromBuffer(pSend, ePolicy);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->EnableNGENPolicy(ePolicy); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method EnableNGENPolicy
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// VMPTR_OBJECTHANDLE GetVmObjectHandle(CORDB_ADDRESS handleAddress)
+void DDUnpack::Unpack_GetVmObjectHandle(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS handleAddress;
+ ReadFromBuffer(pSend, handleAddress);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ VMPTR_OBJECTHANDLE _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetVmObjectHandle(handleAddress); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetVmObjectHandle
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// BOOL IsVmObjectHandleValid(VMPTR_OBJECTHANDLE vmHandle)
+void DDUnpack::Unpack_IsVmObjectHandleValid(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_OBJECTHANDLE vmHandle;
+ ReadFromBuffer(pSend, vmHandle);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ BOOL _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsVmObjectHandleValid(vmHandle); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsVmObjectHandleValid
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT IsWinRTModule(VMPTR_Module vmModule, BOOL & isWinRT)
+void DDUnpack::Unpack_IsWinRTModule(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Module vmModule;
+ ReadFromBuffer(pSend, vmModule);
+ BOOL isWinRT;
+ ReadFromBuffer(pSend, isWinRT);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsWinRTModule(vmModule, isWinRT); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, isWinRT);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsWinRTModule
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// ULONG GetAppDomainIdFromVmObjectHandle(VMPTR_OBJECTHANDLE vmHandle)
+void DDUnpack::Unpack_GetAppDomainIdFromVmObjectHandle(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_OBJECTHANDLE vmHandle;
+ ReadFromBuffer(pSend, vmHandle);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ ULONG _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetAppDomainIdFromVmObjectHandle(vmHandle); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetAppDomainIdFromVmObjectHandle
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// CORDB_ADDRESS GetHandleAddressFromVmHandle(VMPTR_OBJECTHANDLE vmHandle)
+void DDUnpack::Unpack_GetHandleAddressFromVmHandle(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_OBJECTHANDLE vmHandle;
+ ReadFromBuffer(pSend, vmHandle);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ CORDB_ADDRESS _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetHandleAddressFromVmHandle(vmHandle); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetHandleAddressFromVmHandle
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// TargetBuffer GetObjectContents(VMPTR_Object obj)
+void DDUnpack::Unpack_GetObjectContents(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Object obj;
+ ReadFromBuffer(pSend, obj);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ TargetBuffer _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetObjectContents(obj); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetObjectContents
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void EnumerateBlockingObjects(VMPTR_Thread vmThread, IDacDbiInterface::FP_BLOCKINGOBJECT_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+void DDUnpack::Unpack_EnumerateBlockingObjects(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ // Extra stuff for callback handlers
+ m_pReal->EnumerateBlockingObjects(vmThread, GeneralEnumerationCallback, pResult);
+
+ }
+ EX_CATCH_HRESULT(hr);
+
+ // Sentinel for callback list
+ DWORD dw = 2; // Stop
+ WriteToBuffer(pResult, dw);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method EnumerateBlockingObjects
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// MonitorLockInfo GetThreadOwningMonitorLock(VMPTR_Object vmObject)
+void DDUnpack::Unpack_GetThreadOwningMonitorLock(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Object vmObject;
+ ReadFromBuffer(pSend, vmObject);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ MonitorLockInfo _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetThreadOwningMonitorLock(vmObject); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetThreadOwningMonitorLock
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void EnumerateMonitorEventWaitList(VMPTR_Object vmObject, IDacDbiInterface::FP_THREAD_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+void DDUnpack::Unpack_EnumerateMonitorEventWaitList(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Object vmObject;
+ ReadFromBuffer(pSend, vmObject);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ // Extra stuff for callback handlers
+ m_pReal->EnumerateMonitorEventWaitList(vmObject, GeneralEnumerationCallback, pResult);
+
+ }
+ EX_CATCH_HRESULT(hr);
+
+ // Sentinel for callback list
+ DWORD dw = 2; // Stop
+ WriteToBuffer(pResult, dw);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method EnumerateMonitorEventWaitList
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// CLR_DEBUGGING_PROCESS_FLAGS GetAttachStateFlags()
+void DDUnpack::Unpack_GetAttachStateFlags(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ CLR_DEBUGGING_PROCESS_FLAGS _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetAttachStateFlags(); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetAttachStateFlags
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// bool GetMetaDataFileInfoFromPEFile(VMPTR_PEFile vmPEFile, DWORD & dwTimeStamp, DWORD & dwImageSize, bool & isNGEN, IStringHolder * pStrFilename)
+void DDUnpack::Unpack_GetMetaDataFileInfoFromPEFile(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_PEFile vmPEFile;
+ ReadFromBuffer(pSend, vmPEFile);
+ DWORD dwTimeStamp;
+ ReadFromBuffer(pSend, dwTimeStamp);
+ DWORD dwImageSize;
+ ReadFromBuffer(pSend, dwImageSize);
+ bool isNGEN;
+ ReadFromBuffer(pSend, isNGEN);
+ StringCopyHolder _pStrFilename; // storage
+ StringCopyHolder* pStrFilename = &_pStrFilename;
+ // pStrFilename does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ bool _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetMetaDataFileInfoFromPEFile(vmPEFile, dwTimeStamp, dwImageSize, isNGEN, pStrFilename); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, dwTimeStamp);
+ WriteToBuffer(pResult, dwImageSize);
+ WriteToBuffer(pResult, isNGEN);
+ WriteToBuffer(pResult, pStrFilename);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetMetaDataFileInfoFromPEFile
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// bool GetILImageInfoFromNgenPEFile(VMPTR_PEFile vmPEFile, DWORD & dwTimeStamp, DWORD & dwSize, IStringHolder * pStrFilename)
+void DDUnpack::Unpack_GetILImageInfoFromNgenPEFile(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_PEFile vmPEFile;
+ ReadFromBuffer(pSend, vmPEFile);
+ DWORD dwTimeStamp;
+ ReadFromBuffer(pSend, dwTimeStamp);
+ DWORD dwSize;
+ ReadFromBuffer(pSend, dwSize);
+ StringCopyHolder _pStrFilename; // storage
+ StringCopyHolder* pStrFilename = &_pStrFilename;
+ // pStrFilename does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ bool _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetILImageInfoFromNgenPEFile(vmPEFile, dwTimeStamp, dwSize, pStrFilename); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, dwTimeStamp);
+ WriteToBuffer(pResult, dwSize);
+ WriteToBuffer(pResult, pStrFilename);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetILImageInfoFromNgenPEFile
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// bool IsThreadSuspendedOrHijacked(VMPTR_Thread vmThread)
+void DDUnpack::Unpack_IsThreadSuspendedOrHijacked(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ VMPTR_Thread vmThread;
+ ReadFromBuffer(pSend, vmThread);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ bool _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsThreadSuspendedOrHijacked(vmThread); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsThreadSuspendedOrHijacked
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// bool AreGCStructuresValid()
+void DDUnpack::Unpack_AreGCStructuresValid(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ bool _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->AreGCStructuresValid(); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method AreGCStructuresValid
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT CreateHeapWalk(HeapWalkHandle * pHandle)
+void DDUnpack::Unpack_CreateHeapWalk(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ HeapWalkHandle _pHandle; // storage
+ HeapWalkHandle * pHandle = &_pHandle;
+ // pHandle does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->CreateHeapWalk(pHandle); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pHandle);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method CreateHeapWalk
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void DeleteHeapWalk(HeapWalkHandle handle)
+void DDUnpack::Unpack_DeleteHeapWalk(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ HeapWalkHandle handle;
+ ReadFromBuffer(pSend, handle);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->DeleteHeapWalk(handle); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method DeleteHeapWalk
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT WalkHeap(HeapWalkHandle handle, ULONG count, COR_HEAPOBJECT * objects, ULONG * pFetched)
+void DDUnpack::Unpack_WalkHeap(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ HeapWalkHandle handle;
+ ReadFromBuffer(pSend, handle);
+ ULONG count;
+ ReadFromBuffer(pSend, count);
+ COR_HEAPOBJECT _objects; // storage
+ COR_HEAPOBJECT * objects = &_objects;
+ // objects does not need to be copied on input
+ ULONG _pFetched; // storage
+ ULONG * pFetched = &_pFetched;
+ // pFetched does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->WalkHeap(handle, count, objects, pFetched); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, objects);
+ WriteToBuffer(pResult, pFetched);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method WalkHeap
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT GetHeapSegments(DacDbiArrayList<COR_SEGMENT> * pSegments)
+void DDUnpack::Unpack_GetHeapSegments(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ DacDbiArrayList<COR_SEGMENT> _pSegments; // storage
+ DacDbiArrayList<COR_SEGMENT> * pSegments = &_pSegments;
+ // pSegments does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetHeapSegments(pSegments); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pSegments);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetHeapSegments
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// bool IsValidObject(CORDB_ADDRESS obj)
+void DDUnpack::Unpack_IsValidObject(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS obj;
+ ReadFromBuffer(pSend, obj);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ bool _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->IsValidObject(obj); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method IsValidObject
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// bool GetAppDomainForObject(CORDB_ADDRESS obj, VMPTR_AppDomain * pApp, VMPTR_Module * pModule, VMPTR_DomainFile * pDomainFile)
+void DDUnpack::Unpack_GetAppDomainForObject(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS obj;
+ ReadFromBuffer(pSend, obj);
+ VMPTR_AppDomain _pApp; // storage
+ VMPTR_AppDomain * pApp = &_pApp;
+ // pApp does not need to be copied on input
+ VMPTR_Module _pModule; // storage
+ VMPTR_Module * pModule = &_pModule;
+ // pModule does not need to be copied on input
+ VMPTR_DomainFile _pDomainFile; // storage
+ VMPTR_DomainFile * pDomainFile = &_pDomainFile;
+ // pDomainFile does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ bool _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetAppDomainForObject(obj, pApp, pModule, pDomainFile); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pApp);
+ WriteToBuffer(pResult, pModule);
+ WriteToBuffer(pResult, pDomainFile);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetAppDomainForObject
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT CreateRefWalk(RefWalkHandle * pHandle, BOOL walkStacks, BOOL walkFQ, UINT32 handleWalkMask)
+void DDUnpack::Unpack_CreateRefWalk(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ RefWalkHandle _pHandle; // storage
+ RefWalkHandle * pHandle = &_pHandle;
+ // pHandle does not need to be copied on input
+ BOOL walkStacks;
+ ReadFromBuffer(pSend, walkStacks);
+ BOOL walkFQ;
+ ReadFromBuffer(pSend, walkFQ);
+ UINT32 handleWalkMask;
+ ReadFromBuffer(pSend, handleWalkMask);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->CreateRefWalk(pHandle, walkStacks, walkFQ, handleWalkMask); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pHandle);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method CreateRefWalk
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void DeleteRefWalk(RefWalkHandle handle)
+void DDUnpack::Unpack_DeleteRefWalk(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ RefWalkHandle handle;
+ ReadFromBuffer(pSend, handle);
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->DeleteRefWalk(handle); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method DeleteRefWalk
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT WalkRefs(RefWalkHandle handle, ULONG count, DacGcReference * refs, ULONG * pFetched)
+void DDUnpack::Unpack_WalkRefs(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ RefWalkHandle handle;
+ ReadFromBuffer(pSend, handle);
+ ULONG count;
+ ReadFromBuffer(pSend, count);
+ DacGcReference _refs; // storage
+ DacGcReference * refs = &_refs;
+ // refs does not need to be copied on input
+ ULONG _pFetched; // storage
+ ULONG * pFetched = &_pFetched;
+ // pFetched does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->WalkRefs(handle, count, refs, pFetched); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, refs);
+ WriteToBuffer(pResult, pFetched);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method WalkRefs
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT GetTypeID(CORDB_ADDRESS obj, COR_TYPEID * pType)
+void DDUnpack::Unpack_GetTypeID(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ CORDB_ADDRESS obj;
+ ReadFromBuffer(pSend, obj);
+ COR_TYPEID _pType; // storage
+ COR_TYPEID * pType = &_pType;
+ ReadFromBuffer(pSend, &_pType); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetTypeID(obj, pType); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pType);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetTypeID
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT GetObjectFields(COR_TYPEID id, ULONG32 celt, COR_FIELD * layout, ULONG32 * pceltFetched)
+void DDUnpack::Unpack_GetObjectFields(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ COR_TYPEID id;
+ ReadFromBuffer(pSend, id);
+ ULONG32 celt;
+ ReadFromBuffer(pSend, celt);
+ COR_FIELD _layout; // storage
+ COR_FIELD * layout = &_layout;
+ // layout does not need to be copied on input
+ ULONG32 _pceltFetched; // storage
+ ULONG32 * pceltFetched = &_pceltFetched;
+ // pceltFetched does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetObjectFields(id, celt, layout, pceltFetched); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, layout);
+ WriteToBuffer(pResult, pceltFetched);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetObjectFields
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT GetTypeLayout(COR_TYPEID id, COR_TYPE_LAYOUT * pLayout)
+void DDUnpack::Unpack_GetTypeLayout(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ COR_TYPEID id;
+ ReadFromBuffer(pSend, id);
+ COR_TYPE_LAYOUT _pLayout; // storage
+ COR_TYPE_LAYOUT * pLayout = &_pLayout;
+ ReadFromBuffer(pSend, &_pLayout); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetTypeLayout(id, pLayout); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pLayout);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetTypeLayout
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// HRESULT GetArrayLayout(COR_TYPEID id, COR_ARRAY_LAYOUT * pLayout)
+void DDUnpack::Unpack_GetArrayLayout(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ COR_TYPEID id;
+ ReadFromBuffer(pSend, id);
+ COR_ARRAY_LAYOUT _pLayout; // storage
+ COR_ARRAY_LAYOUT * pLayout = &_pLayout;
+ ReadFromBuffer(pSend, &_pLayout); // serialize to storage
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+ HRESULT _retValue; // return result
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ _retValue = m_pReal->GetArrayLayout(id, pLayout); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pLayout);
+ WriteToBuffer(pResult, hr); // exception result
+ WriteToBuffer(pResult, _retValue); // copy back return result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetArrayLayout
+
+//---------------------------------------------------------------------
+// Unpacking stub for:
+// void GetGCHeapInformation(COR_HEAPINFO * pHeapInfo)
+void DDUnpack::Unpack_GetGCHeapInformation(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+
+ // Parameters
+ COR_HEAPINFO _pHeapInfo; // storage
+ COR_HEAPINFO * pHeapInfo = &_pHeapInfo;
+ // pHeapInfo does not need to be copied on input
+ _ASSERTE(pSend->IsAtEnd()); // ensure entire buffer is read
+
+ //
+ // Make the actual call
+ //
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ m_pReal->GetGCHeapInformation(pHeapInfo); // actual call
+
+ }
+ EX_CATCH_HRESULT(hr);
+ // Marshal parameters back
+ WriteToBuffer(pResult, pHeapInfo);
+ WriteToBuffer(pResult, hr); // exception result
+ // Dtors for any DacDbi structures that we marshalled get run here.
+} // end method GetGCHeapInformation
+
+
+//
+// Handler
+//
+void DDUnpack::HandleDDMessage(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+ DD_MessageId id;
+ ReadFromBuffer(pSend, id);
+
+ switch(id)
+ {
+
+ case DDID_CheckDbiVersion:
+ Unpack_CheckDbiVersion(pSend, pResult);
+ break;
+
+ case DDID_GetLocalInterfaceHashAndTimestamp:
+ Unpack_GetLocalInterfaceHashAndTimestamp(pSend, pResult);
+ break;
+
+ case DDID_GetRemoteInterfaceHashAndTimestamp:
+ Unpack_GetRemoteInterfaceHashAndTimestamp(pSend, pResult);
+ break;
+
+ case DDID_FlushCache:
+ Unpack_FlushCache(pSend, pResult);
+ break;
+
+ case DDID_DacSetTargetConsistencyChecks:
+ Unpack_DacSetTargetConsistencyChecks(pSend, pResult);
+ break;
+
+ case DDID_Destroy:
+ Unpack_Destroy(pSend, pResult);
+ break;
+
+ case DDID_IsLeftSideInitialized:
+ Unpack_IsLeftSideInitialized(pSend, pResult);
+ break;
+
+ case DDID_GetAppDomainFromId:
+ Unpack_GetAppDomainFromId(pSend, pResult);
+ break;
+
+ case DDID_GetAppDomainId:
+ Unpack_GetAppDomainId(pSend, pResult);
+ break;
+
+ case DDID_GetAppDomainObject:
+ Unpack_GetAppDomainObject(pSend, pResult);
+ break;
+
+ case DDID_IsDefaultDomain:
+ Unpack_IsDefaultDomain(pSend, pResult);
+ break;
+
+ case DDID_GetAssemblyFromDomainAssembly:
+ Unpack_GetAssemblyFromDomainAssembly(pSend, pResult);
+ break;
+
+ case DDID_IsAssemblyFullyTrusted:
+ Unpack_IsAssemblyFullyTrusted(pSend, pResult);
+ break;
+
+ case DDID_GetAppDomainFullName:
+ Unpack_GetAppDomainFullName(pSend, pResult);
+ break;
+
+ case DDID_GetModuleSimpleName:
+ Unpack_GetModuleSimpleName(pSend, pResult);
+ break;
+
+ case DDID_GetAssemblyPath:
+ Unpack_GetAssemblyPath(pSend, pResult);
+ break;
+
+ case DDID_ResolveTypeReference:
+ Unpack_ResolveTypeReference(pSend, pResult);
+ break;
+
+ case DDID_GetModulePath:
+ Unpack_GetModulePath(pSend, pResult);
+ break;
+
+ case DDID_GetModuleNGenPath:
+ Unpack_GetModuleNGenPath(pSend, pResult);
+ break;
+
+ case DDID_GetMetadata:
+ Unpack_GetMetadata(pSend, pResult);
+ break;
+
+ case DDID_GetSymbolsBuffer:
+ Unpack_GetSymbolsBuffer(pSend, pResult);
+ break;
+
+ case DDID_GetModuleData:
+ Unpack_GetModuleData(pSend, pResult);
+ break;
+
+ case DDID_GetDomainFileData:
+ Unpack_GetDomainFileData(pSend, pResult);
+ break;
+
+ case DDID_GetModuleForDomainFile:
+ Unpack_GetModuleForDomainFile(pSend, pResult);
+ break;
+
+ case DDID_GetAddressType:
+ Unpack_GetAddressType(pSend, pResult);
+ break;
+
+ case DDID_IsTransitionStub:
+ Unpack_IsTransitionStub(pSend, pResult);
+ break;
+
+ case DDID_GetCompilerFlags:
+ Unpack_GetCompilerFlags(pSend, pResult);
+ break;
+
+ case DDID_SetCompilerFlags:
+ Unpack_SetCompilerFlags(pSend, pResult);
+ break;
+
+ case DDID_EnumerateAppDomains:
+ Unpack_EnumerateAppDomains(pSend, pResult);
+ break;
+
+ case DDID_EnumerateAssembliesInAppDomain:
+ Unpack_EnumerateAssembliesInAppDomain(pSend, pResult);
+ break;
+
+ case DDID_EnumerateModulesInAssembly:
+ Unpack_EnumerateModulesInAssembly(pSend, pResult);
+ break;
+
+ case DDID_RequestSyncAtEvent:
+ Unpack_RequestSyncAtEvent(pSend, pResult);
+ break;
+
+ case DDID_MarkDebuggerAttachPending:
+ Unpack_MarkDebuggerAttachPending(pSend, pResult);
+ break;
+
+ case DDID_MarkDebuggerAttached:
+ Unpack_MarkDebuggerAttached(pSend, pResult);
+ break;
+
+ case DDID_Hijack:
+ Unpack_Hijack(pSend, pResult);
+ break;
+
+ case DDID_EnumerateConnections:
+ Unpack_EnumerateConnections(pSend, pResult);
+ break;
+
+ case DDID_EnumerateThreads:
+ Unpack_EnumerateThreads(pSend, pResult);
+ break;
+
+ case DDID_IsThreadMarkedDead:
+ Unpack_IsThreadMarkedDead(pSend, pResult);
+ break;
+
+ case DDID_GetThreadHandle:
+ Unpack_GetThreadHandle(pSend, pResult);
+ break;
+
+ case DDID_GetThreadObject:
+ Unpack_GetThreadObject(pSend, pResult);
+ break;
+
+ case DDID_SetDebugState:
+ Unpack_SetDebugState(pSend, pResult);
+ break;
+
+ case DDID_HasUnhandledException:
+ Unpack_HasUnhandledException(pSend, pResult);
+ break;
+
+ case DDID_GetUserState:
+ Unpack_GetUserState(pSend, pResult);
+ break;
+
+ case DDID_GetConnectionID:
+ Unpack_GetConnectionID(pSend, pResult);
+ break;
+
+ case DDID_GetTaskID:
+ Unpack_GetTaskID(pSend, pResult);
+ break;
+
+ case DDID_TryGetVolatileOSThreadID:
+ Unpack_TryGetVolatileOSThreadID(pSend, pResult);
+ break;
+
+ case DDID_GetUniqueThreadID:
+ Unpack_GetUniqueThreadID(pSend, pResult);
+ break;
+
+ case DDID_GetCurrentException:
+ Unpack_GetCurrentException(pSend, pResult);
+ break;
+
+ case DDID_GetCurrentCustomDebuggerNotification:
+ Unpack_GetCurrentCustomDebuggerNotification(pSend, pResult);
+ break;
+
+ case DDID_GetCurrentAppDomain:
+ Unpack_GetCurrentAppDomain(pSend, pResult);
+ break;
+
+ case DDID_ResolveAssembly:
+ Unpack_ResolveAssembly(pSend, pResult);
+ break;
+
+ case DDID_GetNativeCodeSequencePointsAndVarInfo:
+ Unpack_GetNativeCodeSequencePointsAndVarInfo(pSend, pResult);
+ break;
+
+ case DDID_GetManagedStoppedContext:
+ Unpack_GetManagedStoppedContext(pSend, pResult);
+ break;
+
+ case DDID_CreateStackWalk:
+ Unpack_CreateStackWalk(pSend, pResult);
+ break;
+
+ case DDID_DeleteStackWalk:
+ Unpack_DeleteStackWalk(pSend, pResult);
+ break;
+
+ case DDID_GetStackWalkCurrentContext:
+ Unpack_GetStackWalkCurrentContext(pSend, pResult);
+ break;
+
+ case DDID_SetStackWalkCurrentContext:
+ Unpack_SetStackWalkCurrentContext(pSend, pResult);
+ break;
+
+ case DDID_UnwindStackWalkFrame:
+ Unpack_UnwindStackWalkFrame(pSend, pResult);
+ break;
+
+ case DDID_CheckContext:
+ Unpack_CheckContext(pSend, pResult);
+ break;
+
+ case DDID_GetStackWalkCurrentFrameInfo:
+ Unpack_GetStackWalkCurrentFrameInfo(pSend, pResult);
+ break;
+
+ case DDID_GetCountOfInternalFrames:
+ Unpack_GetCountOfInternalFrames(pSend, pResult);
+ break;
+
+ case DDID_EnumerateInternalFrames:
+ Unpack_EnumerateInternalFrames(pSend, pResult);
+ break;
+
+ case DDID_IsMatchingParentFrame:
+ Unpack_IsMatchingParentFrame(pSend, pResult);
+ break;
+
+ case DDID_GetStackParameterSize:
+ Unpack_GetStackParameterSize(pSend, pResult);
+ break;
+
+ case DDID_GetFramePointer:
+ Unpack_GetFramePointer(pSend, pResult);
+ break;
+
+ case DDID_IsLeafFrame:
+ Unpack_IsLeafFrame(pSend, pResult);
+ break;
+
+ case DDID_GetContext:
+ Unpack_GetContext(pSend, pResult);
+ break;
+
+ case DDID_ConvertContextToDebuggerRegDisplay:
+ Unpack_ConvertContextToDebuggerRegDisplay(pSend, pResult);
+ break;
+
+ case DDID_IsILStubOrLCGMethod:
+ Unpack_IsILStubOrLCGMethod(pSend, pResult);
+ break;
+
+ case DDID_GetVarArgSig:
+ Unpack_GetVarArgSig(pSend, pResult);
+ break;
+
+ case DDID_RequiresAlign8:
+ Unpack_RequiresAlign8(pSend, pResult);
+ break;
+
+ case DDID_ResolveExactGenericArgsToken:
+ Unpack_ResolveExactGenericArgsToken(pSend, pResult);
+ break;
+
+ case DDID_GetILCodeAndSig:
+ Unpack_GetILCodeAndSig(pSend, pResult);
+ break;
+
+ case DDID_GetNativeCodeInfo:
+ Unpack_GetNativeCodeInfo(pSend, pResult);
+ break;
+
+ case DDID_GetNativeCodeInfoForAddr:
+ Unpack_GetNativeCodeInfoForAddr(pSend, pResult);
+ break;
+
+ case DDID_GetClassInfo:
+ Unpack_GetClassInfo(pSend, pResult);
+ break;
+
+ case DDID_GetInstantiationFieldInfo:
+ Unpack_GetInstantiationFieldInfo(pSend, pResult);
+ break;
+
+ case DDID_TypeHandleToExpandedTypeInfo:
+ Unpack_TypeHandleToExpandedTypeInfo(pSend, pResult);
+ break;
+
+ case DDID_GetObjectExpandedTypeInfo:
+ Unpack_GetObjectExpandedTypeInfo(pSend, pResult);
+ break;
+
+ case DDID_GetObjectExpandedTypeInfoFromID:
+ Unpack_GetObjectExpandedTypeInfoFromID(pSend, pResult);
+ break;
+
+ case DDID_GetApproxTypeHandle:
+ Unpack_GetApproxTypeHandle(pSend, pResult);
+ break;
+
+ case DDID_GetExactTypeHandle:
+ Unpack_GetExactTypeHandle(pSend, pResult);
+ break;
+
+ case DDID_GetMethodDescParams:
+ Unpack_GetMethodDescParams(pSend, pResult);
+ break;
+
+ case DDID_GetThreadOrContextStaticAddress:
+ Unpack_GetThreadOrContextStaticAddress(pSend, pResult);
+ break;
+
+ case DDID_GetCollectibleTypeStaticAddress:
+ Unpack_GetCollectibleTypeStaticAddress(pSend, pResult);
+ break;
+
+ case DDID_GetEnCHangingFieldInfo:
+ Unpack_GetEnCHangingFieldInfo(pSend, pResult);
+ break;
+
+ case DDID_GetTypeHandleParams:
+ Unpack_GetTypeHandleParams(pSend, pResult);
+ break;
+
+ case DDID_GetSimpleType:
+ Unpack_GetSimpleType(pSend, pResult);
+ break;
+
+ case DDID_IsExceptionObject:
+ Unpack_IsExceptionObject(pSend, pResult);
+ break;
+
+ case DDID_GetStackFramesFromException:
+ Unpack_GetStackFramesFromException(pSend, pResult);
+ break;
+
+ case DDID_IsRcw:
+ Unpack_IsRcw(pSend, pResult);
+ break;
+
+ case DDID_GetRcwCachedInterfaceTypes:
+ Unpack_GetRcwCachedInterfaceTypes(pSend, pResult);
+ break;
+
+ case DDID_GetRcwCachedInterfacePointers:
+ Unpack_GetRcwCachedInterfacePointers(pSend, pResult);
+ break;
+
+ case DDID_GetCachedWinRTTypesForIIDs:
+ Unpack_GetCachedWinRTTypesForIIDs(pSend, pResult);
+ break;
+
+ case DDID_GetCachedWinRTTypes:
+ Unpack_GetCachedWinRTTypes(pSend, pResult);
+ break;
+
+ case DDID_GetTypedByRefInfo:
+ Unpack_GetTypedByRefInfo(pSend, pResult);
+ break;
+
+ case DDID_GetStringData:
+ Unpack_GetStringData(pSend, pResult);
+ break;
+
+ case DDID_GetArrayData:
+ Unpack_GetArrayData(pSend, pResult);
+ break;
+
+ case DDID_GetBasicObjectInfo:
+ Unpack_GetBasicObjectInfo(pSend, pResult);
+ break;
+
+ case DDID_TestCrst:
+ Unpack_TestCrst(pSend, pResult);
+ break;
+
+ case DDID_TestRWLock:
+ Unpack_TestRWLock(pSend, pResult);
+ break;
+
+ case DDID_GetDebuggerControlBlockAddress:
+ Unpack_GetDebuggerControlBlockAddress(pSend, pResult);
+ break;
+
+ case DDID_GetObjectFromRefPtr:
+ Unpack_GetObjectFromRefPtr(pSend, pResult);
+ break;
+
+ case DDID_GetObject:
+ Unpack_GetObject(pSend, pResult);
+ break;
+
+ case DDID_EnableNGENPolicy:
+ Unpack_EnableNGENPolicy(pSend, pResult);
+ break;
+
+ case DDID_GetVmObjectHandle:
+ Unpack_GetVmObjectHandle(pSend, pResult);
+ break;
+
+ case DDID_IsVmObjectHandleValid:
+ Unpack_IsVmObjectHandleValid(pSend, pResult);
+ break;
+
+ case DDID_IsWinRTModule:
+ Unpack_IsWinRTModule(pSend, pResult);
+ break;
+
+ case DDID_GetAppDomainIdFromVmObjectHandle:
+ Unpack_GetAppDomainIdFromVmObjectHandle(pSend, pResult);
+ break;
+
+ case DDID_GetHandleAddressFromVmHandle:
+ Unpack_GetHandleAddressFromVmHandle(pSend, pResult);
+ break;
+
+ case DDID_GetObjectContents:
+ Unpack_GetObjectContents(pSend, pResult);
+ break;
+
+ case DDID_EnumerateBlockingObjects:
+ Unpack_EnumerateBlockingObjects(pSend, pResult);
+ break;
+
+ case DDID_GetThreadOwningMonitorLock:
+ Unpack_GetThreadOwningMonitorLock(pSend, pResult);
+ break;
+
+ case DDID_EnumerateMonitorEventWaitList:
+ Unpack_EnumerateMonitorEventWaitList(pSend, pResult);
+ break;
+
+ case DDID_GetAttachStateFlags:
+ Unpack_GetAttachStateFlags(pSend, pResult);
+ break;
+
+ case DDID_GetMetaDataFileInfoFromPEFile:
+ Unpack_GetMetaDataFileInfoFromPEFile(pSend, pResult);
+ break;
+
+ case DDID_GetILImageInfoFromNgenPEFile:
+ Unpack_GetILImageInfoFromNgenPEFile(pSend, pResult);
+ break;
+
+ case DDID_IsThreadSuspendedOrHijacked:
+ Unpack_IsThreadSuspendedOrHijacked(pSend, pResult);
+ break;
+
+ case DDID_AreGCStructuresValid:
+ Unpack_AreGCStructuresValid(pSend, pResult);
+ break;
+
+ case DDID_CreateHeapWalk:
+ Unpack_CreateHeapWalk(pSend, pResult);
+ break;
+
+ case DDID_DeleteHeapWalk:
+ Unpack_DeleteHeapWalk(pSend, pResult);
+ break;
+
+ case DDID_WalkHeap:
+ Unpack_WalkHeap(pSend, pResult);
+ break;
+
+ case DDID_GetHeapSegments:
+ Unpack_GetHeapSegments(pSend, pResult);
+ break;
+
+ case DDID_IsValidObject:
+ Unpack_IsValidObject(pSend, pResult);
+ break;
+
+ case DDID_GetAppDomainForObject:
+ Unpack_GetAppDomainForObject(pSend, pResult);
+ break;
+
+ case DDID_CreateRefWalk:
+ Unpack_CreateRefWalk(pSend, pResult);
+ break;
+
+ case DDID_DeleteRefWalk:
+ Unpack_DeleteRefWalk(pSend, pResult);
+ break;
+
+ case DDID_WalkRefs:
+ Unpack_WalkRefs(pSend, pResult);
+ break;
+
+ case DDID_GetTypeID:
+ Unpack_GetTypeID(pSend, pResult);
+ break;
+
+ case DDID_GetObjectFields:
+ Unpack_GetObjectFields(pSend, pResult);
+ break;
+
+ case DDID_GetTypeLayout:
+ Unpack_GetTypeLayout(pSend, pResult);
+ break;
+
+ case DDID_GetArrayLayout:
+ Unpack_GetArrayLayout(pSend, pResult);
+ break;
+
+ case DDID_GetGCHeapInformation:
+ Unpack_GetGCHeapInformation(pSend, pResult);
+ break;
+
+
+ } // end switch
+} // end HandleDDMessage method
+
+#endif //FEATURE_DBGIPC_TRANSPORT_VM
+
+// end of file
diff --git a/src/debug/ee/ddunpack.h b/src/debug/ee/ddunpack.h
new file mode 100644
index 0000000000..23be521642
--- /dev/null
+++ b/src/debug/ee/ddunpack.h
@@ -0,0 +1,498 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+// Unpacker
+//
+// Lives on EE side of the fence
+//
+// Note that this file is generated by ndp\clr\src\Debug\tools\BuildDDMarshal\.
+// Changes should be made to output\DDUnpack_template.h in that directory.
+//
+
+
+#ifndef _DDUNPACK_H_
+#define _DDUNPACK_H_
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+
+#include "ddmarshalutil.h"
+#include "ddshared.h"
+
+
+// This technique is now misnamed, but I am still using it to lower code churn.
+// When handing memory back and forth between DAC and DBI we use this (forDbi) new variant
+// to ensure that everything goes in the correct heap. In DBI it resolves to new, and in DAC
+// it resolves to calling into a special allocator that DBI passed over that ultimately also
+// calls DBI new. A few types such as DacDbiArrayList get included on both sides of the DLL
+// boundary and they assume there will be a (forDbi) new variant available to them.
+//
+// Now however we have a new in-proc consumer of DAC and again we need to pass memory blocks
+// to it. The most straightforward technique is to consume DAC exactly how DBI does, thus mimicing
+// DBI's new variant here so that all the types which straddle the boundary can continue using
+// it. At some point we might want to change the naming to something more general... 'forDacCaller'
+// perhaps. I don't consider the technique a workaround, just the naming is overly specific.
+
+#define forDbi (*(forDbiWorker *)NULL)
+
+// for dbi we just default to new, but we need to have these defined for both dac and dbi
+inline void * operator new(size_t lenBytes, const forDbiWorker &)
+{
+ void * result = new BYTE[lenBytes];
+ if (result == NULL)
+ {
+ ThrowOutOfMemory();
+ }
+ return result;
+}
+
+inline void * operator new[](size_t lenBytes, const forDbiWorker &)
+{
+ void * result = new BYTE[lenBytes];
+ if (result == NULL)
+ {
+ ThrowOutOfMemory();
+ }
+ return result;
+}
+
+// Helper to delete memory used with the IDacDbiInterface::IAllocator interface.
+template<class T> inline
+void DeleteDbiMemory(T *p)
+{
+ delete p;
+}
+
+
+// Header for unpacking
+//
+class DDUnpack
+ {
+ protected:
+ IDacDbiInterface * m_pReal;
+ IDacDbiInterface::IAllocator * m_pAllocator;
+ public:
+ DDUnpack(IDacDbiInterface * pReal, IDacDbiInterface::IAllocator * pAllocator)
+ {
+ m_pReal = pReal;
+ m_pAllocator = pAllocator;
+ }
+
+ // Main entry point
+ // This will then delegate to the proper unpacking stubs.
+ void HandleDDMessage(ReadBuffer * pSend, WriteBuffer * pResult);
+
+
+ //
+ // Stubs
+ //
+
+//---------------------------------------------------------------------
+// Unpacking stubs
+ // HRESULT CheckDbiVersion(const DbiVersion * pVersion)
+ void Unpack_CheckDbiVersion(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetLocalInterfaceHashAndTimestamp(DWORD & hash1, DWORD & hash2, DWORD & hash3, DWORD & hash4, DWORD & timestamp1, DWORD & timestamp2)
+ void Unpack_GetLocalInterfaceHashAndTimestamp(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetRemoteInterfaceHashAndTimestamp(DWORD & hash1, DWORD & hash2, DWORD & hash3, DWORD & hash4, DWORD & timestamp1, DWORD & timestamp2)
+ void Unpack_GetRemoteInterfaceHashAndTimestamp(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT FlushCache()
+ void Unpack_FlushCache(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void DacSetTargetConsistencyChecks(bool fEnableAsserts)
+ void Unpack_DacSetTargetConsistencyChecks(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void Destroy()
+ void Unpack_Destroy(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL IsLeftSideInitialized()
+ void Unpack_IsLeftSideInitialized(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // VMPTR_AppDomain GetAppDomainFromId(ULONG appdomainId)
+ void Unpack_GetAppDomainFromId(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // ULONG GetAppDomainId(VMPTR_AppDomain vmAppDomain)
+ void Unpack_GetAppDomainId(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // VMPTR_OBJECTHANDLE GetAppDomainObject(VMPTR_AppDomain vmAppDomain)
+ void Unpack_GetAppDomainObject(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL IsDefaultDomain(VMPTR_AppDomain vmAppDomain)
+ void Unpack_IsDefaultDomain(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetAssemblyFromDomainAssembly(VMPTR_DomainAssembly vmDomainAssembly, VMPTR_Assembly * vmAssembly)
+ void Unpack_GetAssemblyFromDomainAssembly(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL IsAssemblyFullyTrusted(VMPTR_DomainAssembly vmDomainAssembly)
+ void Unpack_IsAssemblyFullyTrusted(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetAppDomainFullName(VMPTR_AppDomain vmAppDomain, IStringHolder * pStrName)
+ void Unpack_GetAppDomainFullName(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetModuleSimpleName(VMPTR_Module vmModule, IStringHolder * pStrFilename)
+ void Unpack_GetModuleSimpleName(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL GetAssemblyPath(VMPTR_Assembly vmAssembly, IStringHolder * pStrFilename)
+ void Unpack_GetAssemblyPath(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void ResolveTypeReference(const TypeRefData * pTypeRefInfo, TypeRefData * pTargetRefInfo)
+ void Unpack_ResolveTypeReference(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL GetModulePath(VMPTR_Module vmModule, IStringHolder * pStrFilename)
+ void Unpack_GetModulePath(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL GetModuleNGenPath(VMPTR_Module vmModule, IStringHolder * pStrFilename)
+ void Unpack_GetModuleNGenPath(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetMetadata(VMPTR_Module vmModule, TargetBuffer * pTargetBuffer)
+ void Unpack_GetMetadata(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetSymbolsBuffer(VMPTR_Module vmModule, TargetBuffer * pTargetBuffer, IDacDbiInterface::SymbolFormat * pSymbolFormat)
+ void Unpack_GetSymbolsBuffer(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetModuleData(VMPTR_Module vmModule, ModuleInfo * pData)
+ void Unpack_GetModuleData(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetDomainFileData(VMPTR_DomainFile vmDomainFile, DomainFileInfo * pData)
+ void Unpack_GetDomainFileData(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetModuleForDomainFile(VMPTR_DomainFile vmDomainFile, VMPTR_Module * pModule)
+ void Unpack_GetModuleForDomainFile(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // IDacDbiInterface::AddressType GetAddressType(CORDB_ADDRESS address)
+ void Unpack_GetAddressType(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL IsTransitionStub(CORDB_ADDRESS address)
+ void Unpack_IsTransitionStub(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetCompilerFlags(VMPTR_DomainFile vmDomainFile, BOOL * pfAllowJITOpts, BOOL * pfEnableEnC)
+ void Unpack_GetCompilerFlags(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT SetCompilerFlags(VMPTR_DomainFile vmDomainFile, BOOL fAllowJitOpts, BOOL fEnableEnC)
+ void Unpack_SetCompilerFlags(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void EnumerateAppDomains(IDacDbiInterface::FP_APPDOMAIN_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+ void Unpack_EnumerateAppDomains(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void EnumerateAssembliesInAppDomain(VMPTR_AppDomain vmAppDomain, IDacDbiInterface::FP_ASSEMBLY_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+ void Unpack_EnumerateAssembliesInAppDomain(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void EnumerateModulesInAssembly(VMPTR_DomainAssembly vmAssembly, IDacDbiInterface::FP_MODULE_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+ void Unpack_EnumerateModulesInAssembly(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void RequestSyncAtEvent()
+ void Unpack_RequestSyncAtEvent(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void MarkDebuggerAttachPending()
+ void Unpack_MarkDebuggerAttachPending(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void MarkDebuggerAttached(BOOL fAttached)
+ void Unpack_MarkDebuggerAttached(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void Hijack(VMPTR_Thread vmThread, ULONG32 dwThreadId, const EXCEPTION_RECORD * pRecord, T_CONTEXT * pOriginalContext, ULONG32 cbSizeContext, EHijackReason::EHijackReason reason, void * pUserData, CORDB_ADDRESS * pRemoteContextAddr)
+ void Unpack_Hijack(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void EnumerateConnections(IDacDbiInterface::FP_CONNECTION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+ void Unpack_EnumerateConnections(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void EnumerateThreads(IDacDbiInterface::FP_THREAD_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+ void Unpack_EnumerateThreads(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // bool IsThreadMarkedDead(VMPTR_Thread vmThread)
+ void Unpack_IsThreadMarkedDead(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HANDLE GetThreadHandle(VMPTR_Thread vmThread)
+ void Unpack_GetThreadHandle(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // VMPTR_OBJECTHANDLE GetThreadObject(VMPTR_Thread vmThread)
+ void Unpack_GetThreadObject(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void SetDebugState(VMPTR_Thread vmThread, CorDebugThreadState debugState)
+ void Unpack_SetDebugState(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL HasUnhandledException(VMPTR_Thread vmThread)
+ void Unpack_HasUnhandledException(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // CorDebugUserState GetUserState(VMPTR_Thread vmThread)
+ void Unpack_GetUserState(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // CONNID GetConnectionID(VMPTR_Thread vmThread)
+ void Unpack_GetConnectionID(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // TASKID GetTaskID(VMPTR_Thread vmThread)
+ void Unpack_GetTaskID(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // DWORD TryGetVolatileOSThreadID(VMPTR_Thread vmThread)
+ void Unpack_TryGetVolatileOSThreadID(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // DWORD GetUniqueThreadID(VMPTR_Thread vmThread)
+ void Unpack_GetUniqueThreadID(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // VMPTR_OBJECTHANDLE GetCurrentException(VMPTR_Thread vmThread)
+ void Unpack_GetCurrentException(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // VMPTR_OBJECTHANDLE GetCurrentCustomDebuggerNotification(VMPTR_Thread vmThread)
+ void Unpack_GetCurrentCustomDebuggerNotification(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // VMPTR_AppDomain GetCurrentAppDomain(VMPTR_Thread vmThread)
+ void Unpack_GetCurrentAppDomain(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // VMPTR_DomainAssembly ResolveAssembly(VMPTR_DomainFile vmScope, mdToken tkAssemblyRef)
+ void Unpack_ResolveAssembly(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetNativeCodeSequencePointsAndVarInfo(VMPTR_MethodDesc vmMethodDesc, CORDB_ADDRESS startAddress, BOOL fCodeAvailabe, NativeVarData * pNativeVarData, SequencePoints * pSequencePoints)
+ void Unpack_GetNativeCodeSequencePointsAndVarInfo(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // VMPTR_CONTEXT GetManagedStoppedContext(VMPTR_Thread vmThread)
+ void Unpack_GetManagedStoppedContext(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void CreateStackWalk(VMPTR_Thread vmThread, DT_CONTEXT * pInternalContextBuffer, StackWalkHandle * ppSFIHandle)
+ void Unpack_CreateStackWalk(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void DeleteStackWalk(StackWalkHandle ppSFIHandle)
+ void Unpack_DeleteStackWalk(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetStackWalkCurrentContext(StackWalkHandle pSFIHandle, DT_CONTEXT * pContext)
+ void Unpack_GetStackWalkCurrentContext(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void SetStackWalkCurrentContext(VMPTR_Thread vmThread, StackWalkHandle pSFIHandle, CorDebugSetContextFlag flag, DT_CONTEXT * pContext)
+ void Unpack_SetStackWalkCurrentContext(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL UnwindStackWalkFrame(StackWalkHandle pSFIHandle)
+ void Unpack_UnwindStackWalkFrame(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT CheckContext(VMPTR_Thread vmThread, const DT_CONTEXT * pContext)
+ void Unpack_CheckContext(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // IDacDbiInterface::FrameType GetStackWalkCurrentFrameInfo(StackWalkHandle pSFIHandle, DebuggerIPCE_STRData * pFrameData)
+ void Unpack_GetStackWalkCurrentFrameInfo(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // ULONG32 GetCountOfInternalFrames(VMPTR_Thread vmThread)
+ void Unpack_GetCountOfInternalFrames(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void EnumerateInternalFrames(VMPTR_Thread vmThread, IDacDbiInterface::FP_INTERNAL_FRAME_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+ void Unpack_EnumerateInternalFrames(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL IsMatchingParentFrame(FramePointer fpToCheck, FramePointer fpParent)
+ void Unpack_IsMatchingParentFrame(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // ULONG32 GetStackParameterSize(CORDB_ADDRESS controlPC)
+ void Unpack_GetStackParameterSize(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // FramePointer GetFramePointer(StackWalkHandle pSFIHandle)
+ void Unpack_GetFramePointer(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL IsLeafFrame(VMPTR_Thread vmThread, const DT_CONTEXT * pContext)
+ void Unpack_IsLeafFrame(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetContext(VMPTR_Thread vmThread, DT_CONTEXT * pContextBuffer)
+ void Unpack_GetContext(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void ConvertContextToDebuggerRegDisplay(const DT_CONTEXT * pInContext, DebuggerREGDISPLAY * pOutDRD, BOOL fActive)
+ void Unpack_ConvertContextToDebuggerRegDisplay(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // IDacDbiInterface::DynamicMethodType IsILStubOrLCGMethod(VMPTR_MethodDesc vmMethodDesc)
+ void Unpack_IsILStubOrLCGMethod(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // TargetBuffer GetVarArgSig(CORDB_ADDRESS VASigCookieAddr, CORDB_ADDRESS * pArgBase)
+ void Unpack_GetVarArgSig(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL RequiresAlign8(VMPTR_TypeHandle thExact)
+ void Unpack_RequiresAlign8(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // GENERICS_TYPE_TOKEN ResolveExactGenericArgsToken(DWORD dwExactGenericArgsTokenIndex, GENERICS_TYPE_TOKEN rawToken)
+ void Unpack_ResolveExactGenericArgsToken(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetILCodeAndSig(VMPTR_DomainFile vmDomainFile, mdToken functionToken, TargetBuffer * pCodeInfo, mdToken * pLocalSigToken)
+ void Unpack_GetILCodeAndSig(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetNativeCodeInfo(VMPTR_DomainFile vmDomainFile, mdToken functionToken, NativeCodeFunctionData * pCodeInfo)
+ void Unpack_GetNativeCodeInfo(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetNativeCodeInfoForAddr(VMPTR_MethodDesc vmMethodDesc, CORDB_ADDRESS hotCodeStartAddr, NativeCodeFunctionData * pCodeInfo)
+ void Unpack_GetNativeCodeInfoForAddr(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetClassInfo(VMPTR_AppDomain vmAppDomain, VMPTR_Module vmModule, mdTypeDef metadataToken, VMPTR_TypeHandle thExact, VMPTR_TypeHandle thApprox, ClassInfo * pData)
+ void Unpack_GetClassInfo(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetInstantiationFieldInfo(VMPTR_DomainFile vmDomainFile, mdTypeDef metadataToken, VMPTR_TypeHandle vmThExact, VMPTR_TypeHandle vmThApprox, DacDbiArrayList<FieldData> * pFieldList, SIZE_T * pObjectSize)
+ void Unpack_GetInstantiationFieldInfo(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void TypeHandleToExpandedTypeInfo(AreValueTypesBoxed boxed, VMPTR_AppDomain vmAppDomain, VMPTR_TypeHandle vmTypeHandle, DebuggerIPCE_ExpandedTypeData * pTypeInfo)
+ void Unpack_TypeHandleToExpandedTypeInfo(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetObjectExpandedTypeInfo(AreValueTypesBoxed boxed, VMPTR_AppDomain vmAppDomain, CORDB_ADDRESS addr, DebuggerIPCE_ExpandedTypeData * pTypeInfo)
+ void Unpack_GetObjectExpandedTypeInfo(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetObjectExpandedTypeInfoFromID(AreValueTypesBoxed boxed, VMPTR_AppDomain vmAppDomain, COR_TYPEID id, DebuggerIPCE_ExpandedTypeData * pTypeInfo)
+ void Unpack_GetObjectExpandedTypeInfoFromID(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // VMPTR_TypeHandle GetApproxTypeHandle(TypeInfoList * pTypeData)
+ void Unpack_GetApproxTypeHandle(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT GetExactTypeHandle(DebuggerIPCE_ExpandedTypeData * pTypeData, ArgInfoList * pArgInfo, VMPTR_TypeHandle & vmTypeHandle)
+ void Unpack_GetExactTypeHandle(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetMethodDescParams(VMPTR_AppDomain vmAppDomain, VMPTR_MethodDesc vmMethodDesc, GENERICS_TYPE_TOKEN genericsToken, UINT32 * pcGenericClassTypeParams, TypeParamsList * pGenericTypeParams)
+ void Unpack_GetMethodDescParams(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // CORDB_ADDRESS GetThreadOrContextStaticAddress(VMPTR_FieldDesc vmField, VMPTR_Thread vmRuntimeThread)
+ void Unpack_GetThreadOrContextStaticAddress(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // CORDB_ADDRESS GetCollectibleTypeStaticAddress(VMPTR_FieldDesc vmField, VMPTR_AppDomain vmAppDomain)
+ void Unpack_GetCollectibleTypeStaticAddress(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetEnCHangingFieldInfo(const EnCHangingFieldInfo * pEnCFieldInfo, FieldData * pFieldData, BOOL * pfStatic)
+ void Unpack_GetEnCHangingFieldInfo(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetTypeHandleParams(VMPTR_AppDomain vmAppDomain, VMPTR_TypeHandle vmTypeHandle, TypeParamsList * pParams)
+ void Unpack_GetTypeHandleParams(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetSimpleType(VMPTR_AppDomain vmAppDomain, CorElementType simpleType, mdTypeDef * pMetadataToken, VMPTR_Module * pVmModule, VMPTR_DomainFile * pVmDomainFile)
+ void Unpack_GetSimpleType(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL IsExceptionObject(VMPTR_Object vmObject)
+ void Unpack_IsExceptionObject(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetStackFramesFromException(VMPTR_Object vmObject, DacDbiArrayList<DacExceptionCallStackData> & dacStackFrames)
+ void Unpack_GetStackFramesFromException(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL IsRcw(VMPTR_Object vmObject)
+ void Unpack_IsRcw(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetRcwCachedInterfaceTypes(VMPTR_Object vmObject, VMPTR_AppDomain vmAppDomain, BOOL bIInspectableOnly, DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pDacInterfaces)
+ void Unpack_GetRcwCachedInterfaceTypes(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetRcwCachedInterfacePointers(VMPTR_Object vmObject, BOOL bIInspectableOnly, DacDbiArrayList<CORDB_ADDRESS> * pDacItfPtrs)
+ void Unpack_GetRcwCachedInterfacePointers(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetCachedWinRTTypesForIIDs(VMPTR_AppDomain vmAppDomain, DacDbiArrayList<GUID> & iids, DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes)
+ void Unpack_GetCachedWinRTTypesForIIDs(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetCachedWinRTTypes(VMPTR_AppDomain vmAppDomain, DacDbiArrayList<GUID> * piids, DacDbiArrayList<DebuggerIPCE_ExpandedTypeData> * pTypes)
+ void Unpack_GetCachedWinRTTypes(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetTypedByRefInfo(CORDB_ADDRESS pTypedByRef, VMPTR_AppDomain vmAppDomain, DebuggerIPCE_ObjectData * pObjectData)
+ void Unpack_GetTypedByRefInfo(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetStringData(CORDB_ADDRESS objectAddress, DebuggerIPCE_ObjectData * pObjectData)
+ void Unpack_GetStringData(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetArrayData(CORDB_ADDRESS objectAddress, DebuggerIPCE_ObjectData * pObjectData)
+ void Unpack_GetArrayData(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetBasicObjectInfo(CORDB_ADDRESS objectAddress, CorElementType type, VMPTR_AppDomain vmAppDomain, DebuggerIPCE_ObjectData * pObjectData)
+ void Unpack_GetBasicObjectInfo(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void TestCrst(VMPTR_Crst vmCrst)
+ void Unpack_TestCrst(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void TestRWLock(VMPTR_SimpleRWLock vmRWLock)
+ void Unpack_TestRWLock(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // CORDB_ADDRESS GetDebuggerControlBlockAddress()
+ void Unpack_GetDebuggerControlBlockAddress(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // VMPTR_Object GetObjectFromRefPtr(CORDB_ADDRESS ptr)
+ void Unpack_GetObjectFromRefPtr(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // VMPTR_Object GetObject(CORDB_ADDRESS ptr)
+ void Unpack_GetObject(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT EnableNGENPolicy(CorDebugNGENPolicy ePolicy)
+ void Unpack_EnableNGENPolicy(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // VMPTR_OBJECTHANDLE GetVmObjectHandle(CORDB_ADDRESS handleAddress)
+ void Unpack_GetVmObjectHandle(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // BOOL IsVmObjectHandleValid(VMPTR_OBJECTHANDLE vmHandle)
+ void Unpack_IsVmObjectHandleValid(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT IsWinRTModule(VMPTR_Module vmModule, BOOL & isWinRT)
+ void Unpack_IsWinRTModule(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // ULONG GetAppDomainIdFromVmObjectHandle(VMPTR_OBJECTHANDLE vmHandle)
+ void Unpack_GetAppDomainIdFromVmObjectHandle(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // CORDB_ADDRESS GetHandleAddressFromVmHandle(VMPTR_OBJECTHANDLE vmHandle)
+ void Unpack_GetHandleAddressFromVmHandle(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // TargetBuffer GetObjectContents(VMPTR_Object obj)
+ void Unpack_GetObjectContents(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void EnumerateBlockingObjects(VMPTR_Thread vmThread, IDacDbiInterface::FP_BLOCKINGOBJECT_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+ void Unpack_EnumerateBlockingObjects(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // MonitorLockInfo GetThreadOwningMonitorLock(VMPTR_Object vmObject)
+ void Unpack_GetThreadOwningMonitorLock(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void EnumerateMonitorEventWaitList(VMPTR_Object vmObject, IDacDbiInterface::FP_THREAD_ENUMERATION_CALLBACK fpCallback, CALLBACK_DATA pUserData)
+ void Unpack_EnumerateMonitorEventWaitList(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // CLR_DEBUGGING_PROCESS_FLAGS GetAttachStateFlags()
+ void Unpack_GetAttachStateFlags(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // bool GetMetaDataFileInfoFromPEFile(VMPTR_PEFile vmPEFile, DWORD & dwTimeStamp, DWORD & dwImageSize, bool & isNGEN, IStringHolder * pStrFilename)
+ void Unpack_GetMetaDataFileInfoFromPEFile(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // bool GetILImageInfoFromNgenPEFile(VMPTR_PEFile vmPEFile, DWORD & dwTimeStamp, DWORD & dwSize, IStringHolder * pStrFilename)
+ void Unpack_GetILImageInfoFromNgenPEFile(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // bool IsThreadSuspendedOrHijacked(VMPTR_Thread vmThread)
+ void Unpack_IsThreadSuspendedOrHijacked(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // bool AreGCStructuresValid()
+ void Unpack_AreGCStructuresValid(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT CreateHeapWalk(HeapWalkHandle * pHandle)
+ void Unpack_CreateHeapWalk(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void DeleteHeapWalk(HeapWalkHandle handle)
+ void Unpack_DeleteHeapWalk(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT WalkHeap(HeapWalkHandle handle, ULONG count, COR_HEAPOBJECT * objects, ULONG * pFetched)
+ void Unpack_WalkHeap(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT GetHeapSegments(DacDbiArrayList<COR_SEGMENT> * pSegments)
+ void Unpack_GetHeapSegments(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // bool IsValidObject(CORDB_ADDRESS obj)
+ void Unpack_IsValidObject(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // bool GetAppDomainForObject(CORDB_ADDRESS obj, VMPTR_AppDomain * pApp, VMPTR_Module * pModule, VMPTR_DomainFile * pDomainFile)
+ void Unpack_GetAppDomainForObject(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT CreateRefWalk(RefWalkHandle * pHandle, BOOL walkStacks, BOOL walkFQ, UINT32 handleWalkMask)
+ void Unpack_CreateRefWalk(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void DeleteRefWalk(RefWalkHandle handle)
+ void Unpack_DeleteRefWalk(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT WalkRefs(RefWalkHandle handle, ULONG count, DacGcReference * refs, ULONG * pFetched)
+ void Unpack_WalkRefs(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT GetTypeID(CORDB_ADDRESS obj, COR_TYPEID * pType)
+ void Unpack_GetTypeID(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT GetObjectFields(COR_TYPEID id, ULONG32 celt, COR_FIELD * layout, ULONG32 * pceltFetched)
+ void Unpack_GetObjectFields(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT GetTypeLayout(COR_TYPEID id, COR_TYPE_LAYOUT * pLayout)
+ void Unpack_GetTypeLayout(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // HRESULT GetArrayLayout(COR_TYPEID id, COR_ARRAY_LAYOUT * pLayout)
+ void Unpack_GetArrayLayout(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ // void GetGCHeapInformation(COR_HEAPINFO * pHeapInfo)
+ void Unpack_GetGCHeapInformation(ReadBuffer * pSend, WriteBuffer * pResult);
+
+ };
+
+#endif // _DDUNPACK_H_
+
+#endif //FEATURE_DBGIPC_TRANSPORT_VM
+
+// end of file
diff --git a/src/debug/ee/debugger.cpp b/src/debug/ee/debugger.cpp
new file mode 100644
index 0000000000..5b961a2b7e
--- /dev/null
+++ b/src/debug/ee/debugger.cpp
@@ -0,0 +1,17000 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: debugger.cpp
+//
+
+//
+// Debugger runtime controller routines.
+//
+//*****************************************************************************
+
+#include "stdafx.h"
+#include "debugdebugger.h"
+#include "ipcmanagerinterface.h"
+#include "../inc/common.h"
+#include "perflog.h"
+#include "eeconfig.h" // This is here even for retail & free builds...
+#include "../../dlls/mscorrc/resource.h"
+
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+
+#include "context.h"
+#include "vars.hpp"
+#include <limits.h>
+#include "ilformatter.h"
+#include "typeparse.h"
+#include "debuginfostore.h"
+#include "generics.h"
+#include "../../vm/security.h"
+#include "../../vm/methoditer.h"
+#include "../../vm/encee.h"
+#include "../../vm/dwreport.h"
+#include "../../vm/eepolicy.h"
+#include "../../vm/excep.h"
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+#include "dbgtransportsession.h"
+#include "dbgtransportproxy.h"
+#include "dbgsecureconnection.h"
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+
+
+#ifdef TEST_DATA_CONSISTENCY
+#include "datatest.h"
+#endif // TEST_DATA_CONSISTENCY
+
+#if defined(FEATURE_CORECLR)
+#include "dbgenginemetrics.h"
+#endif // FEATURE_CORECLR
+
+#include "../../vm/rejit.h"
+
+#include "threadsuspend.h"
+
+class CCLRSecurityAttributeManager;
+extern CCLRSecurityAttributeManager s_CLRSecurityAttributeManager;
+
+
+#ifdef DEBUGGING_SUPPORTED
+
+#ifdef _DEBUG
+// Reg key. We can set this and then any debugger-lazy-init code will assert.
+// This helps track down places where we're caching in debugger stuff in a
+// non-debugger scenario.
+bool g_DbgShouldntUseDebugger = false;
+#endif
+
+
+/* ------------------------------------------------------------------------ *
+ * Global variables
+ * ------------------------------------------------------------------------ */
+
+GPTR_IMPL(Debugger, g_pDebugger);
+GPTR_IMPL(EEDebugInterface, g_pEEInterface);
+SVAL_IMPL_INIT(BOOL, Debugger, s_fCanChangeNgenFlags, TRUE);
+
+bool g_EnableSIS = false;
+
+
+#ifndef DACCESS_COMPILE
+
+DebuggerRCThread *g_pRCThread = NULL;
+
+#ifndef _PREFAST_
+// Do some compile time checking on the events in DbgIpcEventTypes.h
+// No one ever calls this. But the compiler should still compile it,
+// and that should be sufficient.
+void DoCompileTimeCheckOnDbgIpcEventTypes()
+{
+ _ASSERTE(!"Don't call this function. It just does compile time checking\n");
+
+ // We use the C_ASSERT macro here to get a compile-time assert.
+
+ // Make sure we don't have any duplicate numbers.
+ // The switch statements in the main loops won't always catch this
+ // since we may not switch on all events.
+
+ // store Type-0 in const local vars, so we can use them for bounds checking
+ // Create local vars with the val from Type1 & Type2. If there are any
+ // collisions, then the variables' names will collide at compile time.
+ #define IPC_EVENT_TYPE0(type, val) const int e_##type = val;
+ #define IPC_EVENT_TYPE1(type, val) int T_##val; T_##val = 0;
+ #define IPC_EVENT_TYPE2(type, val) int T_##val; T_##val = 0;
+ #include "dbgipceventtypes.h"
+ #undef IPC_EVENT_TYPE2
+ #undef IPC_EVENT_TYPE1
+ #undef IPC_EVENT_TYPE0
+
+ // Ensure that all identifiers are unique and are matched with
+ // integer values.
+ #define IPC_EVENT_TYPE0(type, val) int T2_##type; T2_##type = val;
+ #define IPC_EVENT_TYPE1(type, val) int T2_##type; T2_##type = val;
+ #define IPC_EVENT_TYPE2(type, val) int T2_##type; T2_##type = val;
+ #include "dbgipceventtypes.h"
+ #undef IPC_EVENT_TYPE2
+ #undef IPC_EVENT_TYPE1
+ #undef IPC_EVENT_TYPE0
+
+ // Make sure all values are subset of the bits specified by DB_IPCE_TYPE_MASK
+ #define IPC_EVENT_TYPE0(type, val)
+ #define IPC_EVENT_TYPE1(type, val) C_ASSERT((val & e_DB_IPCE_TYPE_MASK) == val);
+ #define IPC_EVENT_TYPE2(type, val) C_ASSERT((val & e_DB_IPCE_TYPE_MASK) == val);
+ #include "dbgipceventtypes.h"
+ #undef IPC_EVENT_TYPE2
+ #undef IPC_EVENT_TYPE1
+ #undef IPC_EVENT_TYPE0
+
+ // Make sure that no value is DB_IPCE_INVALID_EVENT
+ #define IPC_EVENT_TYPE0(type, val)
+ #define IPC_EVENT_TYPE1(type, val) C_ASSERT(val != e_DB_IPCE_INVALID_EVENT);
+ #define IPC_EVENT_TYPE2(type, val) C_ASSERT(val != e_DB_IPCE_INVALID_EVENT);
+ #include "dbgipceventtypes.h"
+ #undef IPC_EVENT_TYPE2
+ #undef IPC_EVENT_TYPE1
+ #undef IPC_EVENT_TYPE0
+
+ // Make sure first-last values are well structured.
+ static_assert_no_msg(e_DB_IPCE_RUNTIME_FIRST < e_DB_IPCE_RUNTIME_LAST);
+ static_assert_no_msg(e_DB_IPCE_DEBUGGER_FIRST < e_DB_IPCE_DEBUGGER_LAST);
+
+ // Make sure that event ranges don't overlap.
+ // This check is simplified because L->R events come before R<-L
+ static_assert_no_msg(e_DB_IPCE_RUNTIME_LAST < e_DB_IPCE_DEBUGGER_FIRST);
+
+
+ // Make sure values are in the proper ranges
+ // Type1 should be in the Runtime range, Type2 in the Debugger range.
+ #define IPC_EVENT_TYPE0(type, val)
+ #define IPC_EVENT_TYPE1(type, val) C_ASSERT((e_DB_IPCE_RUNTIME_FIRST <= val) && (val < e_DB_IPCE_RUNTIME_LAST));
+ #define IPC_EVENT_TYPE2(type, val) C_ASSERT((e_DB_IPCE_DEBUGGER_FIRST <= val) && (val < e_DB_IPCE_DEBUGGER_LAST));
+ #include "dbgipceventtypes.h"
+ #undef IPC_EVENT_TYPE2
+ #undef IPC_EVENT_TYPE1
+ #undef IPC_EVENT_TYPE0
+
+ // Make sure that events are in increasing order
+ // It's ok if the events skip numbers.
+ // This is a more specific check than the range check above.
+
+ /* Expands to look like this:
+ const bool f = (
+ first <=
+ 10) && (10 <
+ 11) && (11 <
+ 12) && (12 <
+ last)
+ static_assert_no_msg(f);
+ */
+
+ const bool f1 = (
+ (e_DB_IPCE_RUNTIME_FIRST <=
+ #define IPC_EVENT_TYPE0(type, val)
+ #define IPC_EVENT_TYPE1(type, val) val) && (val <
+ #define IPC_EVENT_TYPE2(type, val)
+ #include "dbgipceventtypes.h"
+ #undef IPC_EVENT_TYPE2
+ #undef IPC_EVENT_TYPE1
+ #undef IPC_EVENT_TYPE0
+ e_DB_IPCE_RUNTIME_LAST)
+ );
+ static_assert_no_msg(f1);
+
+ const bool f2 = (
+ (e_DB_IPCE_DEBUGGER_FIRST <=
+ #define IPC_EVENT_TYPE0(type, val)
+ #define IPC_EVENT_TYPE1(type, val)
+ #define IPC_EVENT_TYPE2(type, val) val) && (val <
+ #include "dbgipceventtypes.h"
+ #undef IPC_EVENT_TYPE2
+ #undef IPC_EVENT_TYPE1
+ #undef IPC_EVENT_TYPE0
+ e_DB_IPCE_DEBUGGER_LAST)
+ );
+ static_assert_no_msg(f2);
+
+} // end checks
+#endif // _PREFAST_
+
+//-----------------------------------------------------------------------------
+// Ctor for AtSafePlaceHolder
+AtSafePlaceHolder::AtSafePlaceHolder(Thread * pThread)
+{
+ _ASSERTE(pThread != NULL);
+ if (!g_pDebugger->IsThreadAtSafePlace(pThread))
+ {
+ m_pThreadAtUnsafePlace = pThread;
+ g_pDebugger->IncThreadsAtUnsafePlaces();
+ }
+ else
+ {
+ m_pThreadAtUnsafePlace = NULL;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Dtor for AtSafePlaceHolder
+AtSafePlaceHolder::~AtSafePlaceHolder()
+{
+ Clear();
+}
+
+//-----------------------------------------------------------------------------
+// Returns true if this adjusted the unsafe counter
+bool AtSafePlaceHolder::IsAtUnsafePlace()
+{
+ return m_pThreadAtUnsafePlace != NULL;
+}
+
+//-----------------------------------------------------------------------------
+// Clear the holder.
+// Notes:
+// This can be called multiple times.
+// Calling this makes the dtor a nop.
+void AtSafePlaceHolder::Clear()
+{
+ if (m_pThreadAtUnsafePlace != NULL)
+ {
+ // The thread is still at an unsafe place.
+ // We're clearing the flag to avoid the Dtor() calling DecThreads again.
+ m_pThreadAtUnsafePlace = NULL;
+ g_pDebugger->DecThreadsAtUnsafePlaces();
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Is the guard page missing on this thread?
+// Should only be called for managed threads handling a managed exception.
+// If we're handling a stack overflow (ie, missing guard page), then another
+// stack overflow will instantly terminate the process. In that case, do stack
+// intensive stuff on the helper thread (which has lots of stack space). Only
+// problem is that if the faulting thread has a lock, the helper thread may
+// get stuck.
+// Serves as a hint whether we want to do a favor on the
+// faulting thread (preferred) or the helper thread (if low stack).
+// See whidbey issue 127436.
+//-----------------------------------------------------------------------------
+bool IsGuardPageGone()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Thread * pThread = g_pEEInterface->GetThread();
+
+ // We're not going to be called for a unmanaged exception.
+ // Should always have a managed thread, but just in case something really
+ // crazy happens, it's not worth an AV. (since this is just being used as a hint)
+ if (pThread == NULL)
+ {
+ return false;
+ }
+
+ // Don't use pThread->IsGuardPageGone(), it's not accurate here.
+ bool fGuardPageGone = (pThread->DetermineIfGuardPagePresent() == FALSE);
+ LOG((LF_CORDB, LL_INFO1000000, "D::IsGuardPageGone=%d\n", fGuardPageGone));
+ return fGuardPageGone;
+}
+
+
+// This is called from AppDomainEnumerationIPCBlock::Lock and Unlock
+void BeginThreadAffinityHelper()
+{
+ WRAPPER_NO_CONTRACT;
+
+ Thread::BeginThreadAffinity();
+}
+void EndThreadAffinityHelper()
+{
+ WRAPPER_NO_CONTRACT;
+ Thread::EndThreadAffinity();
+}
+
+//-----------------------------------------------------------------------------
+// LSPTR_XYZ is a type-safe wrapper around an opaque reference type XYZ in the left-side.
+// But TypeHandles are value-types that can't be directly converted into a pointer.
+// Thus converting between LSPTR_XYZ and TypeHandles requires some extra glue.
+// The following conversions are valid:
+// LSPTR_XYZ <--> XYZ* (via Set/UnWrap methods)
+// TypeHandle <--> void* (via AsPtr() and FromPtr()).
+// so we can't directly convert between LSPTR_TYPEHANDLE and TypeHandle.
+// We must do: TypeHandle <--> void* <--> XYZ <--> LSPTR_XYZ
+// So LSPTR_TYPEHANDLE is actually for TypeHandleDummyPtr, and then we unsafe cast
+// that to a void* to use w/ AsPtr() and FromPtr() to convert to TypeHandles.
+// @todo- it would be nice to have these happen automatically w/ Set & UnWrap.
+//-----------------------------------------------------------------------------
+
+// helper class to do conversion above.
+class TypeHandleDummyPtr
+{
+private:
+ TypeHandleDummyPtr() { }; // should never actually create this.
+ void * data;
+};
+
+// Convert: VMPTR_TYPEHANDLE --> TypeHandle
+TypeHandle GetTypeHandle(VMPTR_TypeHandle ptr)
+{
+ return TypeHandle::FromPtr(ptr.GetRawPtr());
+}
+
+// Convert: TypeHandle --> LSPTR_TYPEHANDLE
+VMPTR_TypeHandle WrapTypeHandle(TypeHandle th)
+{
+ return VMPTR_TypeHandle::MakePtr(reinterpret_cast<TypeHandle *> (th.AsPtr()));
+}
+
+extern void WaitForEndOfShutdown();
+
+
+// Get the Canary structure which can sniff if the helper thread is safe to run.
+HelperCanary * Debugger::GetCanary()
+{
+ return g_pRCThread->GetCanary();
+}
+
+// IMPORTANT!!!!!
+// Do not call Lock and Unlock directly. Because you might not unlock
+// if exception takes place. Use DebuggerLockHolder instead!!!
+// Only AcquireDebuggerLock can call directly.
+//
+void Debugger::DoNotCallDirectlyPrivateLock(void)
+{
+ WRAPPER_NO_CONTRACT;
+
+ LOG((LF_CORDB,LL_INFO10000, "D::Lock aquire attempt by 0x%x\n",
+ GetCurrentThreadId()));
+
+ // Debugger lock is larger than both Controller & debugger-data locks.
+ // So we should never try to take the D lock if we hold either of the others.
+
+
+ // Lock becomes no-op in late shutdown.
+ if (g_fProcessDetach)
+ {
+ return;
+ }
+
+
+ //
+ // If the debugger has been disabled by the runtime, this means that it should block
+ // all threads that are trying to travel thru the debugger. We do this by blocking
+ // threads as they try and take the debugger lock.
+ //
+ if (m_fDisabled)
+ {
+ __SwitchToThread(INFINITE, CALLER_LIMITS_SPINNING);
+ _ASSERTE (!"Can not reach here");
+ }
+
+ m_mutex.Enter();
+
+ //
+ // If we were blocked on the lock and the debugging facilities got disabled
+ // while we were waiting, release the lock and park this thread.
+ //
+ if (m_fDisabled)
+ {
+ m_mutex.Leave();
+ __SwitchToThread(INFINITE, CALLER_LIMITS_SPINNING);
+ _ASSERTE (!"Can not reach here");
+ }
+
+ //
+ // Now check if we are in a shutdown case...
+ //
+ Thread * pThread;
+ bool fIsCooperative;
+ BEGIN_GETTHREAD_ALLOWED;
+ pThread = g_pEEInterface->GetThread();
+ fIsCooperative = (pThread != NULL) && (pThread->PreemptiveGCDisabled());
+ END_GETTHREAD_ALLOWED;
+ if (m_fShutdownMode && !fIsCooperative)
+ {
+ // The big fear is that some other random thread will take the debugger-lock and then block on something else,
+ // and thus prevent the helper/finalizer threads from taking the debugger-lock in shutdown scenarios.
+ //
+ // If we're in shutdown mode, then some locks (like the Thread-Store-Lock) get special semantics.
+ // Only helper / finalizer / shutdown threads can actually take these locks.
+ // Other threads that try to take them will just get parked and block forever.
+ // This is ok b/c the only threads that need to run at this point are the Finalizer and Helper threads.
+ //
+ // We need to be in preemptive to block for shutdown, so we don't do this block in Coop mode.
+ // Fortunately, it's safe to take this lock in coop mode because we know the thread can't block
+ // on anything interesting because we're in a GC-forbid region (see crst flags).
+ m_mutex.ReleaseAndBlockForShutdownIfNotSpecialThread();
+ }
+
+
+
+#ifdef _DEBUG
+ _ASSERTE(m_mutexCount >= 0);
+
+ if (m_mutexCount>0)
+ {
+ if (pThread)
+ {
+ // mamaged thread
+ _ASSERTE(m_mutexOwner == GetThreadIdHelper(pThread));
+ }
+ else
+ {
+ // unmanaged thread
+ _ASSERTE(m_mutexOwner == GetCurrentThreadId());
+ }
+ }
+
+ m_mutexCount++;
+ if (pThread)
+ {
+ m_mutexOwner = GetThreadIdHelper(pThread);
+ }
+ else
+ {
+ // unmanaged thread
+ m_mutexOwner = GetCurrentThreadId();
+ }
+
+ if (m_mutexCount == 1)
+ {
+ LOG((LF_CORDB,LL_INFO10000, "D::Lock aquired by 0x%x\n", m_mutexOwner));
+ }
+#endif
+
+}
+
+// See comment above.
+// Only ReleaseDebuggerLock can call directly.
+void Debugger::DoNotCallDirectlyPrivateUnlock(void)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Controller lock is "smaller" than debugger lock.
+
+
+ if (!g_fProcessDetach)
+ {
+#ifdef _DEBUG
+ if (m_mutexCount == 1)
+ LOG((LF_CORDB,LL_INFO10000, "D::Unlock released by 0x%x\n",
+ m_mutexOwner));
+
+ if(0 == --m_mutexCount)
+ m_mutexOwner = 0;
+
+ _ASSERTE( m_mutexCount >= 0);
+#endif
+ m_mutex.Leave();
+
+ //
+ // If the debugger has been disabled by the runtime, this means that it should block
+ // all threads that are trying to travel thru the debugger. We do this by blocking
+ // threads also as they leave the debugger lock.
+ //
+ if (m_fDisabled)
+ {
+ __SwitchToThread(INFINITE, CALLER_LIMITS_SPINNING);
+ _ASSERTE (!"Can not reach here");
+ }
+
+ }
+}
+
+#ifdef TEST_DATA_CONSISTENCY
+
+// ---------------------------------------------------------------------------------
+// Implementations for DataTest member functions
+// ---------------------------------------------------------------------------------
+
+// Send an event to the RS to signal that it should test to determine if a crst is held.
+// This is for testing purposes only.
+// Arguments:
+// input: pCrst - the lock to test
+// fOkToTake - true iff the LS does NOT currently hold the lock
+// output: none
+// Notes: The RS will throw if the lock is held. The code that tests the lock will catch the
+// exception and assert if throwing was not the correct thing to do (determined via the
+// boolean). See the case for DB_IPCE_TEST_CRST in code:CordbProcess::RawDispatchEvent.
+//
+void DataTest::SendDbgCrstEvent(Crst * pCrst, bool fOkToTake)
+{
+ DebuggerIPCEvent * pLockEvent = g_pDebugger->m_pRCThread->GetIPCEventSendBuffer();
+
+ g_pDebugger->InitIPCEvent(pLockEvent, DB_IPCE_TEST_CRST);
+
+ pLockEvent->TestCrstData.vmCrst.SetRawPtr(pCrst);
+ pLockEvent->TestCrstData.fOkToTake = fOkToTake;
+
+ g_pDebugger->SendRawEvent(pLockEvent);
+
+} // DataTest::SendDbgCrstEvent
+
+// Send an event to the RS to signal that it should test to determine if a SimpleRWLock is held.
+// This is for testing purposes only.
+// Arguments:
+// input: pRWLock - the lock to test
+// fOkToTake - true iff the LS does NOT currently hold the lock
+// output: none
+// Note: The RS will throw if the lock is held. The code that tests the lock will catch the
+// exception and assert if throwing was not the correct thing to do (determined via the
+// boolean). See the case for DB_IPCE_TEST_RWLOCK in code:CordbProcess::RawDispatchEvent.
+//
+void DataTest::SendDbgRWLockEvent(SimpleRWLock * pRWLock, bool okToTake)
+{
+ DebuggerIPCEvent * pLockEvent = g_pDebugger->m_pRCThread->GetIPCEventSendBuffer();
+
+ g_pDebugger->InitIPCEvent(pLockEvent, DB_IPCE_TEST_RWLOCK);
+
+ pLockEvent->TestRWLockData.vmRWLock.SetRawPtr(pRWLock);
+ pLockEvent->TestRWLockData.fOkToTake = okToTake;
+
+ g_pDebugger->SendRawEvent(pLockEvent);
+} // DataTest::SendDbgRWLockEvent
+
+// Takes a series of locks in various ways and signals the RS to test the locks at interesting
+// points to ensure we reliably detect when the LS holds a lock. If in the course of inspection, the
+// DAC needs to execute a code path where the LS holds a lock, we assume that the locked data is in
+// an inconsistent state. In this situation, we don't want to report information about this data, so
+// we throw an exception.
+// This is for testing purposes only.
+//
+// Arguments: none
+// Return Value: none
+// Notes: See code:CordbProcess::RawDispatchEvent for the RS part of this test and code:Debugger::Startup
+// for the LS invocation of the test.
+// The environment variable TestDataConsistency must be set to 1 to make this test run.
+void DataTest::TestDataSafety()
+{
+ const bool okToTake = true;
+
+ SendDbgCrstEvent(&m_crst1, okToTake);
+ {
+ CrstHolder ch1(&m_crst1);
+ SendDbgCrstEvent(&m_crst1, !okToTake);
+ {
+ CrstHolder ch2(&m_crst2);
+ SendDbgCrstEvent(&m_crst2, !okToTake);
+ SendDbgCrstEvent(&m_crst1, !okToTake);
+ }
+ SendDbgCrstEvent(&m_crst2, okToTake);
+ SendDbgCrstEvent(&m_crst1, !okToTake);
+ }
+ SendDbgCrstEvent(&m_crst1, okToTake);
+
+ {
+ SendDbgRWLockEvent(&m_rwLock, okToTake);
+ SimpleReadLockHolder readLock(&m_rwLock);
+ SendDbgRWLockEvent(&m_rwLock, okToTake);
+ }
+ SendDbgRWLockEvent(&m_rwLock, okToTake);
+ {
+ SimpleWriteLockHolder readLock(&m_rwLock);
+ SendDbgRWLockEvent(&m_rwLock, !okToTake);
+ }
+
+} // DataTest::TestDataSafety
+
+#endif // TEST_DATA_CONSISTENCY
+
+#if _DEBUG
+static DebugEventCounter g_debugEventCounter;
+static int g_iDbgRuntimeCounter[DBG_RUNTIME_MAX];
+static int g_iDbgDebuggerCounter[DBG_DEBUGGER_MAX];
+
+void DoAssertOnType(DebuggerIPCEventType event, int count)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // check to see if we need fire the assertion or not.
+ if ((event & 0x0300) == 0x0100)
+ {
+ // use the Runtime array
+ if (g_iDbgRuntimeCounter[event & 0x00ff] == count)
+ {
+ char tmpStr[256];
+ sprintf(tmpStr, "%s == %d, break now!",
+ IPCENames::GetName(event), count);
+
+ // fire the assertion
+ DbgAssertDialog(__FILE__, __LINE__, tmpStr);
+ }
+ }
+ // check to see if we need fire the assertion or not.
+ else if ((event & 0x0300) == 0x0200)
+ {
+ // use the Runtime array
+ if (g_iDbgDebuggerCounter[event & 0x00ff] == count)
+ {
+ char tmpStr[256];
+ sprintf(tmpStr, "%s == %d, break now!",
+ IPCENames::GetName(event), count);
+
+ // fire the assertion
+ DbgAssertDialog(__FILE__, __LINE__, tmpStr);
+ }
+ }
+
+}
+void DbgLogHelper(DebuggerIPCEventType event)
+{
+ WRAPPER_NO_CONTRACT;
+
+ switch (event)
+ {
+// we don't need to handle event type 0
+#define IPC_EVENT_TYPE0(type, val)
+#define IPC_EVENT_TYPE1(type, val) case type: {\
+ g_debugEventCounter.m_iDebugCount_##type++; \
+ DoAssertOnType(type, g_debugEventCounter.m_iDebugCount_##type); \
+ break; \
+ }
+#define IPC_EVENT_TYPE2(type, val) case type: { \
+ g_debugEventCounter.m_iDebugCount_##type++; \
+ DoAssertOnType(type, g_debugEventCounter.m_iDebugCount_##type); \
+ break; \
+ }
+#include "dbgipceventtypes.h"
+#undef IPC_EVENT_TYPE2
+#undef IPC_EVENT_TYPE1
+#undef IPC_EVENT_TYPE0
+ default:
+ break;
+ }
+}
+#endif // _DEBUG
+
+
+
+
+
+
+
+
+
+/* ------------------------------------------------------------------------ *
+ * DLL export routine
+ * ------------------------------------------------------------------------ */
+
+Debugger *CreateDebugger(void)
+{
+ Debugger *pDebugger = NULL;
+
+ EX_TRY
+ {
+ pDebugger = new (nothrow) Debugger();
+ }
+ EX_CATCH
+ {
+ if (pDebugger != NULL)
+ {
+ delete pDebugger;
+ pDebugger = NULL;
+ }
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ return pDebugger;
+}
+
+//
+// CorDBGetInterface is exported to the Runtime so that it can call
+// the Runtime Controller.
+//
+extern "C"{
+HRESULT __cdecl CorDBGetInterface(DebugInterface** rcInterface)
+{
+ CONTRACT(HRESULT)
+ {
+ NOTHROW; // use HRESULTS instead
+ GC_NOTRIGGER;
+ POSTCONDITION(FAILED(RETVAL) || (rcInterface == NULL) || (*rcInterface != NULL));
+ }
+ CONTRACT_END;
+
+ HRESULT hr = S_OK;
+
+ if (rcInterface != NULL)
+ {
+ if (g_pDebugger == NULL)
+ {
+ LOG((LF_CORDB, LL_INFO10,
+ "CorDBGetInterface: initializing debugger.\n"));
+
+ g_pDebugger = CreateDebugger();
+ TRACE_ALLOC(g_pDebugger);
+
+ if (g_pDebugger == NULL)
+ hr = E_OUTOFMEMORY;
+ }
+
+ *rcInterface = g_pDebugger;
+ }
+
+ RETURN hr;
+}
+}
+
+//-----------------------------------------------------------------------------
+// Send a pre-init IPC event and block.
+// We assume the IPC event has already been initialized. There's nothing special
+// here; it just used the standard formula for sending an IPC event to the RS.
+// This should match up w/ the description in SENDIPCEVENT_BEGIN.
+//-----------------------------------------------------------------------------
+void Debugger::SendSimpleIPCEventAndBlock()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ // BEGIN will acquire the lock (END will release it). While blocking, the
+ // debugger may have detached though, so we need to check for that.
+ _ASSERTE(ThreadHoldsLock());
+
+ if (CORDebuggerAttached())
+ {
+ m_pRCThread->SendIPCEvent();
+
+ // Stop all Runtime threads
+ this->TrapAllRuntimeThreads();
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Get context from a thread in managed code.
+// See header for exact semantics.
+//-----------------------------------------------------------------------------
+CONTEXT * GetManagedStoppedCtx(Thread * pThread)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(pThread != NULL);
+
+ // We may be stopped or live.
+
+ // If we're stopped at an interop-hijack, we'll have a filter context,
+ // but we'd better not be redirected for a managed-suspension hijack.
+ if (pThread->GetInteropDebuggingHijacked())
+ {
+ _ASSERTE(!ISREDIRECTEDTHREAD(pThread));
+ return NULL;
+ }
+
+ // Check if we have a filter ctx. This should only be for managed-code.
+ // We're stopped at some exception (likely an int3 or single-step).
+ // Can't have both filter ctx + redirected ctx.
+ CONTEXT *pCtx = g_pEEInterface->GetThreadFilterContext(pThread);
+ if (pCtx != NULL)
+ {
+ _ASSERTE(!ISREDIRECTEDTHREAD(pThread));
+ return pCtx;
+ }
+
+ if (ISREDIRECTEDTHREAD(pThread))
+ {
+ pCtx = GETREDIRECTEDCONTEXT(pThread);
+ _ASSERTE(pCtx != NULL);
+ return pCtx;
+ }
+
+ // Not stopped somewhere in managed code.
+ return NULL;
+}
+
+//-----------------------------------------------------------------------------
+// See header for exact semantics.
+// Never NULL. (Caller guarantees this is active.)
+//-----------------------------------------------------------------------------
+CONTEXT * GetManagedLiveCtx(Thread * pThread)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(pThread != NULL);
+
+ // We should never be on the helper thread, we should only be inspecting our own thread.
+ // We're in some Controller's Filter after hitting an exception.
+ // We're not stopped.
+ //_ASSERTE(!g_pDebugger->IsStopped()); <-- @todo - this fires, need to find out why.
+ _ASSERTE(GetThread() == pThread);
+
+ CONTEXT *pCtx = g_pEEInterface->GetThreadFilterContext(pThread);
+
+ // Note that we may be in a M2U hijack. So we can't assert !pThread->GetInteropDebuggingHijacked()
+ _ASSERTE(!ISREDIRECTEDTHREAD(pThread));
+ _ASSERTE(pCtx);
+
+ return pCtx;
+}
+
+// Attempt to validate a GC handle.
+HRESULT ValidateGCHandle(OBJECTHANDLE oh)
+{
+ // The only real way to do this is to Enumerate all GC handles in the handle table.
+ // That's too expensive. So we'll use a similar workaround that we use in ValidateObject.
+ // This will err on the side off returning True for invalid handles.
+
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ // Use AVInRuntimeImplOkHolder.
+ AVInRuntimeImplOkayHolder AVOkay;
+
+ // This may throw if the Object Handle is invalid.
+ Object * objPtr = *((Object**) oh);
+
+ // NULL is certinally valid...
+ if (objPtr != NULL)
+ {
+ if (!objPtr->ValidateObjectWithPossibleAV())
+ {
+ LOG((LF_CORDB, LL_INFO10000, "GAV: object methodtable-class invariant doesn't hold.\n"));
+ hr = E_INVALIDARG;
+ goto LExit;
+ }
+ }
+
+ LExit: ;
+ }
+ EX_CATCH
+ {
+ LOG((LF_CORDB, LL_INFO10000, "GAV: exception indicated ref is bad.\n"));
+ hr = E_INVALIDARG;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return hr;
+}
+
+
+// Validate an object. Returns E_INVALIDARG or S_OK.
+HRESULT ValidateObject(Object *objPtr)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ // Use AVInRuntimeImplOkHolder.
+ AVInRuntimeImplOkayHolder AVOkay;
+
+ // NULL is certinally valid...
+ if (objPtr != NULL)
+ {
+ if (!objPtr->ValidateObjectWithPossibleAV())
+ {
+ LOG((LF_CORDB, LL_INFO10000, "GAV: object methodtable-class invariant doesn't hold.\n"));
+ hr = E_INVALIDARG;
+ goto LExit;
+ }
+ }
+
+ LExit: ;
+ }
+ EX_CATCH
+ {
+ LOG((LF_CORDB, LL_INFO10000, "GAV: exception indicated ref is bad.\n"));
+ hr = E_INVALIDARG;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return hr;
+} // ValidateObject
+
+
+/* ------------------------------------------------------------------------ *
+ * Debugger routines
+ * ------------------------------------------------------------------------ */
+
+//
+// a Debugger object represents the global state of the debugger program.
+//
+
+//
+// Constructor & Destructor
+//
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+Debugger::Debugger()
+ :
+ m_fLeftSideInitialized(FALSE),
+#ifdef _DEBUG
+ m_mutexCount(0),
+#endif //_DEBUG
+ m_pRCThread(NULL),
+ m_trappingRuntimeThreads(FALSE),
+ m_stopped(FALSE),
+ m_unrecoverableError(FALSE),
+ m_ignoreThreadDetach(FALSE),
+ m_pMethodInfos(NULL),
+ m_mutex(CrstDebuggerMutex, (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_REENTRANCY | CRST_DEBUGGER_THREAD)),
+#ifdef _DEBUG
+ m_mutexOwner(0),
+ m_tidLockedForEventSending(0),
+#endif //_DEBUG
+ m_threadsAtUnsafePlaces(0),
+ m_jitAttachInProgress(FALSE),
+ m_attachingForManagedEvent(FALSE),
+ m_launchingDebugger(FALSE),
+ m_userRequestedDebuggerLaunch(FALSE),
+ m_LoggingEnabled(TRUE),
+ m_pAppDomainCB(NULL),
+ m_dClassLoadCallbackCount(0),
+ m_pModules(NULL),
+ m_RSRequestedSync(FALSE),
+ m_sendExceptionsOutsideOfJMC(TRUE),
+ m_pIDbgThreadControl(NULL),
+ m_forceNonInterceptable(FALSE),
+ m_pLazyData(NULL),
+ m_defines(_defines)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ CONSTRUCTOR_CHECK;
+ }
+ CONTRACTL_END;
+
+ m_fShutdownMode = false;
+ m_fDisabled = false;
+
+#ifdef _DEBUG
+ InitDebugEventCounting();
+#endif
+
+ m_processId = GetCurrentProcessId();
+
+ // Initialize these in ctor because we free them in dtor.
+ // And we can't set them to some safe uninited value (like NULL).
+
+
+
+ //------------------------------------------------------------------------------
+ // Metadata data structure version numbers
+ //
+ // 1 - initial state of the layouts ( .Net 4.5.2 )
+ //
+ // as data structure layouts change, add a new version number
+ // and comment the changes
+ m_mdDataStructureVersion = 1;
+
+}
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+Debugger::~Debugger()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ DESTRUCTOR_CHECK;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ // We explicitly leak the debugger object on shutdown. See Debugger::StopDebugger for details.
+ _ASSERTE(!"Debugger dtor should not be called.");
+}
+
+#ifdef FEATURE_HIJACK
+typedef void (*PFN_HIJACK_FUNCTION) (void);
+
+// Given the start address and the end address of a function, return a MemoryRange for the function.
+inline MemoryRange GetMemoryRangeForFunction(PFN_HIJACK_FUNCTION pfnStart, PFN_HIJACK_FUNCTION pfnEnd)
+{
+ PCODE pfnStartAddress = (PCODE)GetEEFuncEntryPoint(pfnStart);
+ PCODE pfnEndAddress = (PCODE)GetEEFuncEntryPoint(pfnEnd);
+ return MemoryRange(dac_cast<PTR_VOID>(pfnStartAddress), (pfnEndAddress - pfnStartAddress));
+}
+
+// static
+MemoryRange Debugger::s_hijackFunction[kMaxHijackFunctions] =
+ {GetMemoryRangeForFunction(ExceptionHijack, ExceptionHijackEnd),
+ GetMemoryRangeForFunction(RedirectedHandledJITCaseForGCThreadControl_Stub,
+ RedirectedHandledJITCaseForGCThreadControl_StubEnd),
+ GetMemoryRangeForFunction(RedirectedHandledJITCaseForDbgThreadControl_Stub,
+ RedirectedHandledJITCaseForDbgThreadControl_StubEnd),
+ GetMemoryRangeForFunction(RedirectedHandledJITCaseForUserSuspend_Stub,
+ RedirectedHandledJITCaseForUserSuspend_StubEnd),
+ GetMemoryRangeForFunction(RedirectedHandledJITCaseForYieldTask_Stub,
+ RedirectedHandledJITCaseForYieldTask_StubEnd)};
+#endif // FEATURE_HIJACK
+
+// Save the necessary information for the debugger to recognize an IP in one of the thread redirection
+// functions.
+void Debugger::InitializeHijackFunctionAddress()
+{
+#ifdef FEATURE_HIJACK
+ // Advertise hijack address for the DD Hijack primitive
+ m_rgHijackFunction = Debugger::s_hijackFunction;
+#endif // FEATURE_HIJACK
+}
+
+// For debug-only builds, we'll have a debugging feature to count
+// the number of ipc events and break on a specific number.
+// Initialize the stuff to do that.
+void Debugger::InitDebugEventCounting()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+#ifdef _DEBUG
+ // initialize the debug event counter structure to zero
+ memset(&g_debugEventCounter, 0, sizeof(DebugEventCounter));
+ memset(&g_iDbgRuntimeCounter, 0, DBG_RUNTIME_MAX*sizeof(int));
+ memset(&g_iDbgDebuggerCounter, 0, DBG_DEBUGGER_MAX*sizeof(int));
+
+ // retrieve the possible counter for break point
+ LPWSTR wstrValue = NULL;
+ // The string value is of the following format
+ // <Event Name>=Count;<Event Name>=Count;....;
+ // The string must end with ;
+ if ((wstrValue = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DebuggerBreakPoint)) != NULL)
+ {
+ LPSTR strValue;
+ int cbReq;
+ cbReq = WszWideCharToMultiByte(CP_UTF8, 0, wstrValue,-1, 0,0, 0,0);
+
+ strValue = new (nothrow) char[cbReq+1];
+ // This is a debug only thingy, if it fails, not worth taking
+ // down the process.
+ if (strValue == NULL)
+ return;
+
+
+ // now translate the unicode to ansi string
+ WszWideCharToMultiByte(CP_UTF8, 0, wstrValue, -1, strValue, cbReq+1, 0,0);
+ char *szEnd = (char *)strchr(strValue, ';');
+ char *szStart = strValue;
+ while (szEnd != NULL)
+ {
+ // Found a key value
+ char *szNameEnd = strchr(szStart, '=');
+ int iCount;
+ DebuggerIPCEventType eventType;
+ if (szNameEnd != NULL)
+ {
+ // This is a well form key
+ *szNameEnd = '\0';
+ *szEnd = '\0';
+
+ // now szStart is the key name null terminated. Translate the counter into integer.
+ iCount = atoi(szNameEnd+1);
+ if (iCount != 0)
+ {
+ eventType = IPCENames::GetEventType(szStart);
+
+ if (eventType < DB_IPCE_DEBUGGER_FIRST)
+ {
+ // use the runtime one
+ g_iDbgRuntimeCounter[eventType & 0x00ff] = iCount;
+ }
+ else if (eventType < DB_IPCE_DEBUGGER_LAST)
+ {
+ // use the debugger one
+ g_iDbgDebuggerCounter[eventType & 0x00ff] = iCount;
+ }
+ else
+ _ASSERTE(!"Unknown Event Type");
+ }
+ }
+ szStart = szEnd + 1;
+ // try to find next key value
+ szEnd = (char *)strchr(szStart, ';');
+ }
+
+ // free the ansi buffer
+ delete [] strValue;
+ REGUTIL::FreeConfigString(wstrValue);
+ }
+#endif // _DEBUG
+}
+
+
+// This is a notification from the EE it's about to go to fiber mode.
+// This is given *before* it actually goes to fiber mode.
+HRESULT Debugger::SetFiberMode(bool isFiberMode)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ // Notifications from EE never come on helper worker.
+ PRECONDITION(!ThisIsHelperThreadWorker());
+ }
+ CONTRACTL_END;
+
+
+ Thread * pThread = ::GetThread();
+
+ m_pRCThread->m_pDCB->m_bHostingInFiber = isFiberMode;
+
+ // If there is a debugger already attached, then we have a big problem. As of V2.0, the debugger
+ // does not support debugging processes with fibers in them. We set the unrecoverable state to
+ // indicate that we're in a bad state now. The debugger will notice this, and take appropiate action.
+ if (isFiberMode && CORDebuggerAttached())
+ {
+ LOG((LF_CORDB, LL_INFO10, "Thread has entered fiber mode while debugger attached.\n"));
+
+ EX_TRY
+ {
+ // We send up a MDA for two reasons: 1) we want to give the user some chance to see what went wrong,
+ // and 2) we want to get the Right Side to notice that we're in an unrecoverable error state now.
+
+ SString szName(W("DebuggerFiberModeNotSupported"));
+ SString szDescription;
+ szDescription.LoadResource(CCompRC::Debugging, MDARC_DEBUGGER_FIBER_MODE_NOT_SUPPORTED);
+ SString szXML(W(""));
+
+ // Sending any debug event will be a GC violation.
+ // However, if we're enabling fiber-mode while a debugger is attached, we're already doomed.
+ // Deadlocks and AVs are just around the corner. A Gc-violation is the least of our worries.
+ // We want to at least notify the debugger at all costs.
+ CONTRACT_VIOLATION(GCViolation);
+
+ // As soon as we set unrecoverable error in the LS, the RS will pick it up and basically shut down.
+ // It won't dispatch any events. So we fire the MDA first, and then set unrecoverable error.
+ SendMDANotification(pThread, &szName, &szDescription, &szXML, (CorDebugMDAFlags) 0, FALSE);
+
+ CORDBDebuggerSetUnrecoverableError(this, CORDBG_E_CANNOT_DEBUG_FIBER_PROCESS, false);
+
+ // Fire the MDA again just to force the RS to sniff the LS and pick up that we're in an unrecoverable error.
+ // No harm done from dispatching an MDA twice. And
+ SendMDANotification(pThread, &szName, &szDescription, &szXML, (CorDebugMDAFlags) 0, FALSE);
+
+ }
+ EX_CATCH
+ {
+ LOG((LF_CORDB, LL_INFO10, "Error sending MDA regarding fiber mode.\n"));
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+
+ return S_OK;
+}
+
+// Checks if the MethodInfos table has been allocated, and if not does so.
+// Throw on failure, so we always return
+HRESULT Debugger::CheckInitMethodInfoTable()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (m_pMethodInfos == NULL)
+ {
+ DebuggerMethodInfoTable *pMethodInfos = NULL;
+
+ EX_TRY
+ {
+ pMethodInfos = new (interopsafe) DebuggerMethodInfoTable();
+ }
+ EX_CATCH
+ {
+ pMethodInfos = NULL;
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+
+ if (pMethodInfos == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ if (InterlockedCompareExchangeT(&m_pMethodInfos, pMethodInfos, NULL) != NULL)
+ {
+ DeleteInteropSafe(pMethodInfos);
+ }
+ }
+
+ return S_OK;
+}
+
+// Checks if the m_pModules table has been allocated, and if not does so.
+HRESULT Debugger::CheckInitModuleTable()
+{
+ CONTRACT(HRESULT)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ POSTCONDITION(m_pModules != NULL);
+ }
+ CONTRACT_END;
+
+ if (m_pModules == NULL)
+ {
+ DebuggerModuleTable *pModules = new (interopsafe, nothrow) DebuggerModuleTable();
+
+ if (pModules == NULL)
+ {
+ RETURN (E_OUTOFMEMORY);
+ }
+
+ if (InterlockedCompareExchangeT(&m_pModules, pModules, NULL) != NULL)
+ {
+ DeleteInteropSafe(pModules);
+ }
+ }
+
+ RETURN (S_OK);
+}
+
+// Checks if the m_pModules table has been allocated, and if not does so.
+HRESULT Debugger::CheckInitPendingFuncEvalTable()
+{
+ CONTRACT(HRESULT)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ POSTCONDITION(GetPendingEvals() != NULL);
+ }
+ CONTRACT_END;
+
+#ifndef DACCESS_COMPILE
+
+ if (GetPendingEvals() == NULL)
+ {
+ DebuggerPendingFuncEvalTable *pPendingEvals = new (interopsafe, nothrow) DebuggerPendingFuncEvalTable();
+
+ if (pPendingEvals == NULL)
+ {
+ RETURN(E_OUTOFMEMORY);
+ }
+
+ // Since we're setting, we need an LValue and not just an accessor.
+ if (InterlockedCompareExchangeT(&(GetLazyData()->m_pPendingEvals), pPendingEvals, NULL) != NULL)
+ {
+ DeleteInteropSafe(pPendingEvals);
+ }
+ }
+#endif
+
+ RETURN (S_OK);
+}
+
+
+#ifdef _DEBUG_DMI_TABLE
+// Returns the number of (official) entries in the table
+ULONG DebuggerMethodInfoTable::CheckDmiTable(void)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ULONG cApparant = 0;
+ ULONG cOfficial = 0;
+
+ if (NULL != m_pcEntries)
+ {
+ DebuggerMethodInfoEntry *dcp;
+ int i = 0;
+ while (i++ <m_iEntries)
+ {
+ dcp = (DebuggerMethodInfoEntry*)&(((DebuggerMethodInfoEntry *)m_pcEntries)[i]);
+ if(dcp->pFD != 0 &&
+ dcp->pFD != (MethodDesc*)0xcdcdcdcd &&
+ dcp->mi != NULL)
+ {
+ cApparant++;
+
+ _ASSERTE( dcp->pFD == dcp->mi->m_fd );
+ LOG((LF_CORDB, LL_INFO1000, "DMIT::CDT:Entry:0x%p mi:0x%p\nPrevs:\n",
+ dcp, dcp->mi));
+ DebuggerMethodInfo *dmi = dcp->mi->m_prevMethodInfo;
+
+ while(dmi != NULL)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "\t0x%p\n", dmi));
+ dmi = dmi->m_prevMethodInfo;
+ }
+ dmi = dcp->mi->m_nextMethodInfo;
+
+ LOG((LF_CORDB, LL_INFO1000, "Nexts:\n", dmi));
+ while(dmi != NULL)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "\t0x%p\n", dmi));
+ dmi = dmi->m_nextMethodInfo;
+ }
+
+ LOG((LF_CORDB, LL_INFO1000, "DMIT::CDT:DONE\n",
+ dcp, dcp->mi));
+ }
+ }
+
+ if (m_piBuckets == 0)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DMIT::CDT: The table is officially empty!\n"));
+ return cOfficial;
+ }
+
+ LOG((LF_CORDB, LL_INFO1000, "DMIT::CDT:Looking for official entries:\n"));
+
+ ULONG iNext = m_piBuckets[0];
+ ULONG iBucket = 1;
+ HASHENTRY *psEntry = NULL;
+ while (TRUE)
+ {
+ while (iNext != UINT32_MAX)
+ {
+ cOfficial++;
+
+ psEntry = EntryPtr(iNext);
+ dcp = ((DebuggerMethodInfoEntry *)psEntry);
+
+ LOG((LF_CORDB, LL_INFO1000, "\tEntry:0x%p mi:0x%p @idx:0x%x @bucket:0x%x\n",
+ dcp, dcp->mi, iNext, iBucket));
+
+ iNext = psEntry->iNext;
+ }
+
+ // Advance to the next bucket.
+ if (iBucket < m_iBuckets)
+ iNext = m_piBuckets[iBucket++];
+ else
+ break;
+ }
+
+ LOG((LF_CORDB, LL_INFO1000, "DMIT::CDT:Finished official entries: ****************"));
+ }
+
+ return cOfficial;
+}
+#endif // _DEBUG_DMI_TABLE
+
+
+//---------------------------------------------------------------------------------------
+//
+// Class constructor for DebuggerEval. This is the supporting data structure for
+// func-eval tracking.
+//
+// Arguments:
+// pContext - The context to return to when done with this eval.
+// pEvalInfo - Contains all the important information, such as parameters, type args, method.
+// fInException - TRUE if the thread for the eval is currently in an exception notification.
+//
+DebuggerEval::DebuggerEval(CONTEXT * pContext, DebuggerIPCE_FuncEvalInfo * pEvalInfo, bool fInException)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // This must be non-zero so that the saved opcode is non-zero, and on IA64 we want it to be 0x16
+ // so that we can have a breakpoint instruction in any slot in the bundle.
+ m_breakpointInstruction[0] = 0x16;
+#if defined(_TARGET_ARM_)
+ USHORT *bp = (USHORT*)&m_breakpointInstruction;
+ *bp = CORDbg_BREAK_INSTRUCTION;
+#endif // _TARGET_ARM_
+ m_thread = pEvalInfo->vmThreadToken.GetRawPtr();
+ m_evalType = pEvalInfo->funcEvalType;
+ m_methodToken = pEvalInfo->funcMetadataToken;
+ m_classToken = pEvalInfo->funcClassMetadataToken;
+
+ // Note: we can't rely on just the DebuggerModule* or AppDomain* because the AppDomain
+ // could get unloaded between now and when the funceval actually starts. So we stash an
+ // AppDomain ID which is safe to use after the AD is unloaded. It's only safe to
+ // use the DebuggerModule* after we've verified the ADID is still valid (i.e. by entering that domain).
+ m_debuggerModule = g_pDebugger->LookupOrCreateModule(pEvalInfo->vmDomainFile);
+
+ if (m_debuggerModule == NULL)
+ {
+ // We have no associated code.
+ _ASSERTE((m_evalType == DB_IPCE_FET_NEW_STRING) || (m_evalType == DB_IPCE_FET_NEW_ARRAY));
+
+ // We'll just do the creation in whatever domain the thread is already in.
+ // It's conceivable that we might want to allow the caller to specify a specific domain, but
+ // ICorDebug provides the debugger with no was to specify the domain.
+ m_appDomainId = m_thread->GetDomain()->GetId();
+ }
+ else
+ {
+ m_appDomainId = m_debuggerModule->GetAppDomain()->GetId();
+ }
+
+ m_funcEvalKey = pEvalInfo->funcEvalKey;
+ m_argCount = pEvalInfo->argCount;
+ m_targetCodeAddr = NULL;
+ m_stringSize = pEvalInfo->stringSize;
+ m_arrayRank = pEvalInfo->arrayRank;
+ m_genericArgsCount = pEvalInfo->genericArgsCount;
+ m_genericArgsNodeCount = pEvalInfo->genericArgsNodeCount;
+ m_successful = false;
+ m_argData = NULL;
+ m_result = 0;
+ m_md = NULL;
+ m_resultType = TypeHandle();
+ m_aborting = FE_ABORT_NONE;
+ m_aborted = false;
+ m_completed = false;
+ m_evalDuringException = fInException;
+ m_rethrowAbortException = false;
+ m_retValueBoxing = Debugger::NoValueTypeBoxing;
+ m_requester = (Thread::ThreadAbortRequester)0;
+ m_vmObjectHandle = VMPTR_OBJECTHANDLE::NullPtr();
+
+ // Copy the thread's context.
+ if (pContext == NULL)
+ {
+ memset(&m_context, 0, sizeof(m_context));
+ }
+ else
+ {
+ memcpy(&m_context, pContext, sizeof(m_context));
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// This constructor is only used when setting up an eval to re-abort a thread.
+//
+// Arguments:
+// pContext - The context to return to when done with this eval.
+// pThread - The thread to re-abort.
+// requester - The type of abort to throw.
+//
+DebuggerEval::DebuggerEval(CONTEXT * pContext, Thread * pThread, Thread::ThreadAbortRequester requester)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // This must be non-zero so that the saved opcode is non-zero, and on IA64 we want it to be 0x16
+ // so that we can have a breakpoint instruction in any slot in the bundle.
+ m_breakpointInstruction[0] = 0x16;
+ m_thread = pThread;
+ m_evalType = DB_IPCE_FET_RE_ABORT;
+ m_methodToken = mdMethodDefNil;
+ m_classToken = mdTypeDefNil;
+ m_debuggerModule = NULL;
+ m_funcEvalKey = RSPTR_CORDBEVAL::NullPtr();
+ m_argCount = 0;
+ m_stringSize = 0;
+ m_arrayRank = 0;
+ m_genericArgsCount = 0;
+ m_genericArgsNodeCount = 0;
+ m_successful = false;
+ m_argData = NULL;
+ m_targetCodeAddr = NULL;
+ m_result = 0;
+ m_md = NULL;
+ m_resultType = TypeHandle();
+ m_aborting = FE_ABORT_NONE;
+ m_aborted = false;
+ m_completed = false;
+ m_evalDuringException = false;
+ m_rethrowAbortException = false;
+ m_retValueBoxing = Debugger::NoValueTypeBoxing;
+ m_requester = requester;
+
+ if (pContext == NULL)
+ {
+ memset(&m_context, 0, sizeof(m_context));
+ }
+ else
+ {
+ memcpy(&m_context, pContext, sizeof(m_context));
+ }
+}
+
+
+#ifdef _DEBUG
+// Thread proc for interop stress coverage. Have an unmanaged thread
+// that just loops throwing native exceptions. This can test corner cases
+// such as getting an native exception while the runtime is synced.
+DWORD WINAPI DbgInteropStressProc(void * lpParameter)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ int i = 0;
+ int zero = 0;
+
+
+ // This will ensure that the compiler doesn't flag our 1/0 exception below at compile-time.
+ if (lpParameter != NULL)
+ {
+ zero = 1;
+ }
+
+ // Note that this thread is a non-runtime thread. So it can't take any CLR locks
+ // or do anything else that may block the helper thread.
+ // (Log statements take CLR locks).
+ while(true)
+ {
+ i++;
+
+ if ((i % 10) != 0)
+ {
+ // Generate an in-band event.
+ PAL_CPP_TRY
+ {
+ // Throw a handled exception. Don't use an AV since that's pretty special.
+ *(int*)lpParameter = 1 / zero;
+ }
+ PAL_CPP_CATCH_ALL
+ {
+ }
+ PAL_CPP_ENDTRY
+ }
+ else
+ {
+ // Generate the occasional oob-event.
+ WszOutputDebugString(W("Ping from DbgInteropStressProc"));
+ }
+
+ // This helps parallelize if we have a lot of threads, and keeps us from
+ // chewing too much CPU time.
+ ClrSleepEx(2000,FALSE);
+ ClrSleepEx(GetRandomInt(1000), FALSE);
+ }
+
+ return 0;
+}
+
+// ThreadProc that does everything in a can't stop region.
+DWORD WINAPI DbgInteropCantStopStressProc(void * lpParameter)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // This will mark us as a can't stop region.
+ ClrFlsSetThreadType (ThreadType_DbgHelper);
+
+ return DbgInteropStressProc(lpParameter);
+}
+
+// Generate lots of OOB events.
+DWORD WINAPI DbgInteropDummyStressProc(void * lpParameter)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ ClrSleepEx(1,FALSE);
+ return 0;
+}
+
+DWORD WINAPI DbgInteropOOBStressProc(void * lpParameter)
+{
+ WRAPPER_NO_CONTRACT;
+
+ int i = 0;
+ while(true)
+ {
+ i++;
+ if (i % 10 == 1)
+ {
+ // Create a dummy thread. That generates 2 oob events
+ // (1 for create, 1 for destroy)
+ DWORD id;
+ ::CreateThread(NULL, 0, DbgInteropDummyStressProc, NULL, 0, &id);
+ }
+ else
+ {
+ // Generate the occasional oob-event.
+ WszOutputDebugString(W("OOB ping from "));
+ }
+
+ ClrSleepEx(3000, FALSE);
+ }
+
+ return 0;
+}
+
+// List of the different possible stress procs.
+LPTHREAD_START_ROUTINE g_pStressProcs[] =
+{
+ DbgInteropOOBStressProc,
+ DbgInteropCantStopStressProc,
+ DbgInteropStressProc
+};
+#endif
+
+
+DebuggerHeap * Debugger::GetInteropSafeHeap()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Lazily initialize our heap.
+ if (!m_heap.IsInit())
+ {
+ _ASSERTE(!"InteropSafe Heap should have already been initialized in LazyInit");
+
+ // Just in case we miss it in retail, convert to OOM here:
+ ThrowOutOfMemory();
+ }
+
+ return &m_heap;
+}
+
+DebuggerHeap * Debugger::GetInteropSafeHeap_NoThrow()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Lazily initialize our heap.
+ if (!m_heap.IsInit())
+ {
+ _ASSERTE(!"InteropSafe Heap should have already been initialized in LazyInit");
+
+ // Just in case we miss it in retail, convert to OOM here:
+ return NULL;
+ }
+ return &m_heap;
+}
+
+DebuggerHeap * Debugger::GetInteropSafeExecutableHeap()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Lazily initialize our heap.
+ if (!m_executableHeap.IsInit())
+ {
+ _ASSERTE(!"InteropSafe Executable Heap should have already been initialized in LazyInit");
+
+ // Just in case we miss it in retail, convert to OOM here:
+ ThrowOutOfMemory();
+ }
+
+ return &m_executableHeap;
+}
+
+DebuggerHeap * Debugger::GetInteropSafeExecutableHeap_NoThrow()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Lazily initialize our heap.
+ if (!m_executableHeap.IsInit())
+ {
+ _ASSERTE(!"InteropSafe Executable Heap should have already been initialized in LazyInit");
+
+ // Just in case we miss it in retail, convert to OOM here:
+ return NULL;
+ }
+ return &m_executableHeap;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Notify potential debugger that the runtime has started up
+//
+//
+// Assumptions:
+// Called during startup path
+//
+// Notes:
+// If no debugger is attached, this does nothing.
+//
+//---------------------------------------------------------------------------------------
+void Debugger::RaiseStartupNotification()
+{
+ // Right-side will read this field from OOP via DAC-primitive to determine attach or launch case.
+ // We do an interlocked increment to gaurantee this is an atomic memory write, and to ensure
+ // that it's flushed from any CPU cache into memory.
+ InterlockedIncrement(&m_fLeftSideInitialized);
+
+ // If we are remote debugging, don't send the event now if a debugger is not attached. No one will be
+ // listening, and we will fail. However, we still want to initialize the variable above.
+ BOOL fRaiseStartupNotification = TRUE;
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(useTransport)
+ {
+ fRaiseStartupNotification = (CORDebuggerAttached() ? TRUE : FALSE);
+ }
+#endif
+ if (fRaiseStartupNotification)
+ {
+ DebuggerIPCEvent startupEvent;
+ InitIPCEvent(&startupEvent, DB_IPCE_LEFTSIDE_STARTUP, NULL, VMPTR_AppDomain::NullPtr());
+
+ SendRawEvent(&startupEvent);
+
+ // RS will set flags from OOP while we're stopped at the event if it wants to attach.
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Sends a raw managed debug event to the debugger.
+//
+// Arguments:
+// pManagedEvent - managed debug event
+//
+//
+// Notes:
+// This can be called even if a debugger is not attached.
+// The entire process will get frozen by the debugger once we send. The debugger
+// needs to resume the process. It may detach as well.
+// See code:IsEventDebuggerNotification for decoding this event. These methods must stay in sync.
+// The debugger process reads the events via code:CordbProcess.CopyManagedEventFromTarget.
+//
+//---------------------------------------------------------------------------------------
+void Debugger::SendRawEvent(const DebuggerIPCEvent * pManagedEvent)
+{
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(useTransport)
+ {
+ HRESULT hr = g_pDbgTransport->SendDebugEvent(const_cast<DebuggerIPCEvent *>(pManagedEvent));
+
+ if (FAILED(hr))
+ {
+ _ASSERTE(!"Failed to send debugger event");
+
+ STRESS_LOG1(LF_CORDB, LL_INFO1000, "D::SendIPCEvent Error on Send with 0x%x\n", hr);
+ UnrecoverableError(hr,
+ 0,
+ FILE_DEBUG,
+ LINE_DEBUG,
+ false);
+
+ // @dbgtodo Mac - what can we do here?
+ }
+ }
+ else
+ {
+#endif
+ // We get to send an array of ULONG_PTRs as data with the notification.
+ // The debugger can then use ReadProcessMemory to read through this array.
+ ULONG_PTR rgData [] = {
+ CLRDBG_EXCEPTION_DATA_CHECKSUM,
+ (ULONG_PTR) g_pMSCorEE,
+ (ULONG_PTR) pManagedEvent
+ };
+
+ // If no debugger attached, then don't bother raising a 1st-chance exception because nobody will sniff it.
+ // @dbgtodo iDNA: in iDNA case, the recorder may sniff it.
+ if (!IsDebuggerPresent())
+ {
+ return;
+ }
+
+ //
+ // Physically send the event via an OS Exception. We're using exceptions as a notification
+ // mechanism on top of the OS native debugging pipeline.
+ // @dbgtodo cross-plat - this needs to be cross-plat.
+ //
+ EX_TRY
+ {
+ const DWORD dwFlags = 0; // continuable (eg, Debugger can continue GH)
+ RaiseException(CLRDBG_NOTIFICATION_EXCEPTION_CODE, dwFlags, NumItems(rgData), rgData);
+
+ // If debugger continues "GH" (DBG_CONTINUE), then we land here.
+ // This is the expected path for a well-behaved ICorDebug debugger.
+ }
+ EX_CATCH
+ {
+ // If no debugger is attached, or if the debugger continues "GN" (DBG_EXCEPTION_NOT_HANDLED), then we land here.
+ // A naive (not-ICorDebug aware) native-debugger won't handle the exception and so land us here.
+ // We may also get here if a debugger detaches at the Exception notification
+ // (and thus implicitly continues GN).
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+}
+
+//---------------------------------------------------------------------------------------
+// Send a createProcess event to give the RS a chance to do SetDesiredNGENFlags
+//
+// Arguments:
+// pDbgLockHolder - lock holder.
+//
+// Assumptions:
+// Lock is initially held. This will toggle the lock to send an IPC event.
+// This will start a synchronization.
+//
+// Notes:
+// In V2, this also gives the RS a chance to intialize the IPC protocol.
+// Spefically, this needs to be sent before the LS can send a sync-complete.
+//---------------------------------------------------------------------------------------
+void Debugger::SendCreateProcess(DebuggerLockHolder * pDbgLockHolder)
+{
+ pDbgLockHolder->Release();
+
+ // Encourage helper thread to spin up so that we're in a consistent state.
+ PollWaitingForHelper();
+
+ // we don't need to use SENDIPCEVENT_BEGIN/END macros that perform the debug-suspend aware checks,
+ // as this code executes on the startup path...
+ SENDIPCEVENT_RAW_BEGIN(pDbgLockHolder);
+
+ // Send a CreateProcess event.
+ // @dbgtodo pipeline - eliminate these reasons for needing a CreateProcess event (part of pipeline feature crew)
+ // This will let the RS know that the IPC block is up + ready, and then the RS can read it.
+ // The RS will then update the DCB with enough information so that we can send the sync-complete.
+ // (such as letting us know whether we're interop-debugging or not).
+ DebuggerIPCEvent event;
+ InitIPCEvent(&event, DB_IPCE_CREATE_PROCESS, NULL, VMPTR_AppDomain::NullPtr());
+ SendRawEvent(&event);
+
+ // @dbgtodo inspection- it doesn't really make sense to sync on a CreateProcess. We only have 1 thread
+ // in the CLR and we know exactly what state we're in and we can ensure that we're synchronized.
+ // For V3,RS should be able to treat a CreateProcess like a synchronized.
+ // Remove this in V3 as we make SetDesiredNgenFlags operate OOP.
+ TrapAllRuntimeThreads();
+
+ // Must have a thread object so that we ensure that we will actually block here.
+ // This ensures the debuggee is actually stopped at startup, and
+ // this gives the debugger a chance to call SetDesiredNGENFlags before we
+ // set s_fCanChangeNgenFlags to FALSE.
+ _ASSERTE(GetThread() != NULL);
+ SENDIPCEVENT_RAW_END;
+
+ pDbgLockHolder->Acquire();
+}
+
+#ifdef FEATURE_CORECLR
+HANDLE g_hContinueStartupEvent = NULL;
+
+CLR_ENGINE_METRICS g_CLREngineMetrics = {
+ sizeof(CLR_ENGINE_METRICS),
+ CorDebugVersion_4_0,
+ &g_hContinueStartupEvent};
+
+
+bool IsTelestoDebugPackInstalled()
+{
+#ifdef FEATURE_PAL
+ return false;
+#else
+ RegKeyHolder hKey;
+ if (ERROR_SUCCESS != WszRegOpenKeyEx(HKEY_LOCAL_MACHINE, FRAMEWORK_REGISTRY_KEY_W, 0, KEY_READ, &hKey))
+ return false;
+
+ bool debugPackInstalled = false;
+
+ DWORD cbValue = 0;
+
+ if (ERROR_SUCCESS == WszRegQueryValueEx(hKey, CLRConfig::EXTERNAL_DbgPackShimPath, NULL, NULL, NULL, &cbValue))
+ {
+ if (cbValue != 0)
+ {
+ debugPackInstalled = true;
+ }
+ }
+
+ // RegCloseKey called by holder
+ return debugPackInstalled;
+#endif // FEATURE_PAL
+}
+
+
+#define StartupNotifyEventNamePrefix W("TelestoStartupEvent_")
+const int cchEventNameBufferSize = sizeof(StartupNotifyEventNamePrefix)/sizeof(WCHAR) + 8; // + hex DWORD (8). NULL terminator is included in sizeof(StartupNotifyEventNamePrefix)
+HANDLE OpenStartupNotificationEvent()
+{
+ DWORD debuggeePID = GetCurrentProcessId();
+ WCHAR szEventName[cchEventNameBufferSize];
+ swprintf_s(szEventName, cchEventNameBufferSize, StartupNotifyEventNamePrefix W("%08x"), debuggeePID);
+
+ return WszOpenEvent(EVENT_ALL_ACCESS, FALSE, szEventName);
+}
+
+void NotifyDebuggerOfTelestoStartup()
+{
+ // Create the continue event first so that we guarantee that any
+ // enumeration of this process will get back a valid continue event
+ // the instant we signal the startup notification event.
+
+ CONSISTENCY_CHECK(NULL == g_hContinueStartupEvent);
+ g_hContinueStartupEvent = WszCreateEvent(NULL, TRUE, FALSE, NULL);
+ CONSISTENCY_CHECK(INVALID_HANDLE_VALUE != g_hContinueStartupEvent); // we reserve this value for error conditions in EnumerateCLRs
+
+ HANDLE startupEvent = OpenStartupNotificationEvent();
+ if (startupEvent != NULL)
+ {
+ // signal notification event
+ SetEvent(startupEvent);
+ CloseHandle(startupEvent);
+ startupEvent = NULL;
+
+ // wait on continue startup event
+ // The debugger may attach to us while we're blocked here.
+ WaitForSingleObject(g_hContinueStartupEvent, INFINITE);
+ }
+
+ CloseHandle(g_hContinueStartupEvent);
+ g_hContinueStartupEvent = NULL;
+}
+#endif // FEATURE_CORECLR
+
+//---------------------------------------------------------------------------------------
+//
+// Initialize Left-Side debugger object
+//
+// Return Value:
+// S_OK on successs. May also throw.
+//
+// Assumptions:
+// This is called in the startup path.
+//
+// Notes:
+// Startup initializes any necessary debugger objects, including creating
+// and starting the Runtime Controller thread. Once the RC thread is started
+// and we return successfully, the Debugger object can expect to have its
+// event handlers called.
+//
+//---------------------------------------------------------------------------------------
+HRESULT Debugger::Startup(void)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ _ASSERTE(g_pEEInterface != NULL);
+
+#if defined(FEATURE_CORECLR) && !defined(FEATURE_PAL)
+ if (IsWatsonEnabled() || IsTelestoDebugPackInstalled())
+ {
+ // Iff the debug pack is installed, then go through the telesto debugging pipeline.
+ LOG((LF_CORDB, LL_INFO10, "Debugging service is enabled because debug pack is installed or Watson support is enabled)\n"));
+
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(!useTransport)
+ {
+#endif
+ // This may block while an attach occurs.
+ NotifyDebuggerOfTelestoStartup();
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+#endif
+ }
+ else
+ {
+ // On Windows, it's actually safe to finish the initialization here even without the debug pack.
+ // However, doing so causes a perf regression because we used to bail out early if the debug
+ // pack is not installed.
+ //
+ // Unlike Windows, we can't continue executing this function if the debug pack is not installed.
+ // The transport requires the debug pack to be present. Otherwise it'll raise a fatal error.
+ return S_FALSE;
+ }
+#endif // FEATURE_CORECLR && !FEATURE_PAL
+
+ DebuggerLockHolder dbgLockHolder(this);
+
+
+ // Stubs in Stacktraces are always enabled.
+ g_EnableSIS = true;
+
+ // We can get extra Interop-debugging test coverage by having some auxillary unmanaged
+ // threads running and throwing debug events. Keep these stress procs separate so that
+ // we can focus on certain problem areas.
+#ifdef _DEBUG
+
+ g_DbgShouldntUseDebugger = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgNoDebugger) != 0;
+
+
+ // Creates random thread procs.
+ DWORD dwRegVal = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgExtraThreads);
+ DWORD dwId;
+ DWORD i;
+
+ if (dwRegVal > 0)
+ {
+ for(i = 0; i < dwRegVal; i++)
+ {
+ int iProc = GetRandomInt(NumItems(g_pStressProcs));
+ LPTHREAD_START_ROUTINE pStartRoutine = g_pStressProcs[iProc];
+ ::CreateThread(NULL, 0, pStartRoutine, NULL, 0, &dwId);
+ LOG((LF_CORDB, LL_INFO1000, "Created random thread (%d) with tid=0x%x\n", i, dwId));
+ }
+ }
+
+ dwRegVal = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgExtraThreadsIB);
+ if (dwRegVal > 0)
+ {
+ for(i = 0; i < dwRegVal; i++)
+ {
+ ::CreateThread(NULL, 0, DbgInteropStressProc, NULL, 0, &dwId);
+ LOG((LF_CORDB, LL_INFO1000, "Created extra thread (%d) with tid=0x%x\n", i, dwId));
+ }
+ }
+
+ dwRegVal = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgExtraThreadsCantStop);
+ if (dwRegVal > 0)
+ {
+ for(i = 0; i < dwRegVal; i++)
+ {
+ ::CreateThread(NULL, 0, DbgInteropCantStopStressProc, NULL, 0, &dwId);
+ LOG((LF_CORDB, LL_INFO1000, "Created extra thread 'can't-stop' (%d) with tid=0x%x\n", i, dwId));
+ }
+ }
+
+ dwRegVal = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgExtraThreadsOOB);
+ if (dwRegVal > 0)
+ {
+ for(i = 0; i < dwRegVal; i++)
+ {
+ ::CreateThread(NULL, 0, DbgInteropOOBStressProc, NULL, 0, &dwId);
+ LOG((LF_CORDB, LL_INFO1000, "Created extra thread OOB (%d) with tid=0x%x\n", i, dwId));
+ }
+ }
+#endif
+
+
+ // Lazily initialize the interop-safe heap
+
+ // Must be done before the RC thread is initialized.
+ // @dbgtodo - In V2, LS was lazily initialized; but was eagerly pre-initialized if launched by debugger.
+ // (This was for perf reasons). But we don't want Launch vs. Attach checks in the LS, so we now always
+ // init. As we move more to OOP, this init will become cheaper.
+ {
+ LazyInit();
+ DebuggerController::Initialize();
+ }
+
+ InitializeHijackFunctionAddress();
+
+ // Create the runtime controller thread, a.k.a, the debug helper thread.
+ // Don't use the interop-safe heap b/c we don't want to lazily create it.
+ m_pRCThread = new DebuggerRCThread(this);
+
+ _ASSERTE(m_pRCThread != NULL); // throws on oom
+ TRACE_ALLOC(m_pRCThread);
+
+ hr = m_pRCThread->Init();
+
+ _ASSERTE(SUCCEEDED(hr)); // throws on error
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(useTransport)
+ {
+ // The in-process DAC for Mac is lazily initialized when we get the first DDMessage.
+ // We check whether the DAC and the runtime has matching versions when we do the initialization, and
+ // we'll fail if the versions don't match. That's why we don't want to do the initialization here because
+ // even if we have the wrong version of DAC, managed apps can still run. We just can't debug it.
+
+ // Create transport control block and initialize it.
+ g_pDbgTransport = new DbgTransportSession();
+ hr = g_pDbgTransport->Init(m_pRCThread->GetDCB(), m_pAppDomainCB, &m_inProcDac);
+ if (FAILED(hr))
+ ThrowHR(hr);
+
+ // Create interface to talk to debugger proxy and initialize it.
+ DbgTransportProxy *pProxy = new DbgTransportProxy();
+ hr = pProxy->Init(g_pDbgTransport->GetPort());
+ if (FAILED(hr))
+ ThrowHR(hr);
+
+ // Contact the debugger proxy process for this machine. This has several purposes:
+ // 1) Register this runtime instance as available for debugging.
+ // 2) Check whether a debugger is already waiting to attach to us.
+ // 3) Publish the port number we expect debugging requests to target.
+ // The following call blocks until we receive a reply from the proxy or time out.
+ DbgProxyResult result = pProxy->RegisterWithProxy();
+ switch (result)
+ {
+ case RequestTimedOut:
+ // The proxy doesn't appear to be there, we're not debuggable as a result.
+ // To be careful (and avoid malicious types trying to connect to us even when the proxy is not up)
+ // neuter the transport so that it won't accept any connections. Ideally we'd just shutdown the
+ // debugger subsystem entirely, but this appears to be somewhat complex at this late stage.
+ g_pDbgTransport->Neuter();
+ break;
+ case RequestSuccessful:
+ // We registered with the proxy successfully. No debugger was interested in
+ // us just yet.
+ break;
+ case PendingDebuggerAttach:
+ // We registered with the proxy and found that a debugger was registered for
+ // an early attach.
+
+ // Mark this process as launched by the debugger and the debugger as attached.
+ g_CORDebuggerControlFlags |= DBCF_GENERATE_DEBUG_CODE;
+ MarkDebuggerAttachedInternal();
+
+ LazyInit();
+ DebuggerController::Initialize();
+ break;
+ default:
+ _ASSERTE(!"Unknown result code from DbgTransportSession::RegisterWithProxy()");
+ }
+
+ // The debugger no longer needs to talk with the proxy.
+ pProxy->Shutdown();
+ delete pProxy;
+ }
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+
+ RaiseStartupNotification();
+
+ // Also initialize the AppDomainEnumerationIPCBlock
+#if defined(FEATURE_IPCMAN)
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(useTransport)
+ {
+ m_pAppDomainCB = new (nothrow) AppDomainEnumerationIPCBlock();
+ }
+ else
+ {
+#endif
+ m_pAppDomainCB = g_pIPCManagerInterface->GetAppDomainBlock();
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ }
+#endif
+#else // FEATURE_IPCMAN
+ m_pAppDomainCB = new (nothrow) AppDomainEnumerationIPCBlock();
+#endif // FEATURE_IPCMAN
+
+ if (m_pAppDomainCB == NULL)
+ {
+ LOG((LF_CORDB, LL_INFO100, "D::S: Failed to get AppDomain IPC block from IPCManager.\n"));
+ ThrowHR(E_FAIL);
+ }
+
+ hr = InitAppDomainIPC();
+ _ASSERTE(SUCCEEDED(hr)); // throws on error.
+
+ // See if we need to spin up the helper thread now, rather than later.
+ DebuggerIPCControlBlock* pIPCControlBlock = m_pRCThread->GetDCB();
+ (void)pIPCControlBlock; //prevent "unused variable" error from GCC
+
+ _ASSERTE(pIPCControlBlock != NULL);
+
+ _ASSERTE(!pIPCControlBlock->m_rightSideShouldCreateHelperThread);
+ {
+ // Create the win32 thread for the helper and let it run free.
+ hr = m_pRCThread->Start();
+
+ // convert failure to exception as with old contract
+ if (FAILED(hr))
+ {
+ ThrowHR(hr);
+ }
+
+ LOG((LF_CORDB, LL_EVERYTHING, "Start was successful\n"));
+ }
+
+#ifdef TEST_DATA_CONSISTENCY
+ // if we have set the environment variable TestDataConsistency, run the data consistency test.
+ // See code:DataTest::TestDataSafety for more information
+ if((g_pConfig != NULL) && (g_pConfig->TestDataConsistency() == true))
+ {
+ DataTest dt;
+ dt.TestDataSafety();
+ }
+#endif
+
+ // We don't bother changing this process's permission.
+ // A managed debugger will have the SE_DEBUG permission which will allow it to open our process handle,
+ // even if we're a guest account.
+
+ return hr;
+}
+
+//---------------------------------------------------------------------------------------
+// Finishes startup once we have a Thread object.
+//
+// Arguments:
+// pThread - the current thread. Must be non-null
+//
+// Notes:
+// Most debugger initialization is done in code:Debugger.Startup,
+// However, debugger can't block on synchronization without a Thread object,
+// so sending IPC events must wait until after we have a thread object.
+//---------------------------------------------------------------------------------------
+HRESULT Debugger::StartupPhase2(Thread * pThread)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Must have a thread so that we can block
+ _ASSERTE(pThread != NULL);
+
+ DebuggerLockHolder dbgLockHolder(this);
+
+ // @dbgtodo - This may need to change when we remove SetupSyncEvent...
+ // If we're launching, then sync now so that the RS gets an early chance to dispatch the CreateProcess event.
+ // This is especially important b/c certain portions of the ICorDebugAPI (like setting ngen flags) are only
+ // valid during the CreateProcess callback in the launch case.
+ // We need to send the callback early enough so those APIs can set the flags before they're actually used.
+ // We also ensure the debugger is actually attached.
+ if (SUCCEEDED(hr) && CORDebuggerAttached())
+ {
+ StartCanaryThread();
+ SendCreateProcess(&dbgLockHolder); // toggles lock
+ }
+
+ // After returning from debugger startup we assume that the runtime might start using the NGEN flags to make
+ // binding decisions. From now on the debugger can not influence NGEN binding policy
+ s_fCanChangeNgenFlags = FALSE;
+
+ // Must release the lock (which would be done at the end of this method anyways) so that
+ // the helper thread can do the jit-attach.
+ dbgLockHolder.Release();
+
+
+#ifdef _DEBUG
+ // Give chance for stress harnesses to launch a managed debugger when a managed app starts up.
+ // This lets us run a set of managed apps under a debugger.
+ if (!CORDebuggerAttached())
+ {
+ #define DBG_ATTACH_ON_STARTUP_ENV_VAR W("COMPlus_DbgAttachOnStartup")
+
+ // We explicitly just check the env because we don't want a switch this invasive to be global.
+ DWORD fAttach = WszGetEnvironmentVariable(DBG_ATTACH_ON_STARTUP_ENV_VAR, NULL, 0) > 0;
+
+ if (fAttach)
+ {
+ // Remove the env var from our process so that the debugger we spin up won't inherit it.
+ // Else, if the debugger is managed, we'll have an infinite recursion.
+ BOOL fOk = WszSetEnvironmentVariable(DBG_ATTACH_ON_STARTUP_ENV_VAR, NULL);
+
+ if (fOk)
+ {
+ // We've already created the helper thread (which can service the attach request)
+ // So just do a normal jit-attach now.
+
+ SString szName(W("DebuggerStressStartup"));
+ SString szDescription(W("MDA used for debugger-stress scenario. This is fired to trigger a jit-attach")
+ W("to allow us to attach a debugger to any managed app that starts up.")
+ W("This MDA is only fired when the 'DbgAttachOnStartup' COM+ knob/reg-key is set on checked builds."));
+ SString szXML(W("<xml>See the description</xml>"));
+
+ SendMDANotification(
+ NULL, // NULL b/c we don't have a thread yet
+ &szName,
+ &szDescription,
+ &szXML,
+ ((CorDebugMDAFlags) 0 ),
+ TRUE // this will force the jit-attach
+ );
+ }
+ }
+ }
+#endif
+
+
+ return hr;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Public entrypoint into the debugger to force the lazy data to be initialized at a
+// controlled point in time. This is useful for those callers into the debugger (e.g.,
+// ETW rundown) that know they will need the lazy data initialized but cannot afford to
+// have it initialized unpredictably or inside a lock.
+//
+// This may be called more than once, and will know to initialize the lazy data only
+// once.
+//
+
+void Debugger::InitializeLazyDataIfNecessary()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if (!HasLazyData())
+ {
+ DebuggerLockHolder lockHolder(this);
+ LazyInit(); // throws
+ }
+}
+
+
+/******************************************************************************
+Lazy initialize stuff once we know we are debugging.
+This reduces the startup cost in the non-debugging case.
+
+We can do this at a bunch of random strategic places.
+ ******************************************************************************/
+
+HRESULT Debugger::LazyInitWrapper()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(ThisMaybeHelperThread());
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // Do lazy initialization now.
+ EX_TRY
+ {
+ LazyInit(); // throws on errors.
+ }
+ EX_CATCH
+ {
+ Exception *_ex = GET_EXCEPTION();
+ hr = _ex->GetHR();
+ STRESS_LOG1(LF_CORDB, LL_ALWAYS, "LazyInit failed w/ hr:0x%08x\n", hr);
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return hr;
+}
+
+void Debugger::LazyInit()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(ThreadHoldsLock()); // ensure we're serialized, requires GC_NOTRIGGER
+
+ PRECONDITION(ThisMaybeHelperThread());
+ }
+ CONTRACTL_END;
+
+ // Have knob that catches places where we lazy init.
+ _ASSERTE(!g_DbgShouldntUseDebugger);
+
+ // If we're already init, then bail.
+ if (m_pLazyData != NULL)
+ {
+ return;
+ }
+
+
+
+
+ // Lazily create our heap.
+ HRESULT hr = m_heap.Init(FALSE);
+ IfFailThrow(hr);
+
+ hr = m_executableHeap.Init(TRUE);
+ IfFailThrow(hr);
+
+ m_pLazyData = new (interopsafe) DebuggerLazyInit();
+ _ASSERTE(m_pLazyData != NULL); // throws on oom.
+
+ m_pLazyData->Init();
+
+}
+
+HelperThreadFavor::HelperThreadFavor() :
+ m_fpFavor(NULL),
+ m_pFavorData(NULL),
+ m_FavorReadEvent(NULL),
+ m_FavorLock(CrstDebuggerFavorLock, CRST_DEFAULT),
+ m_FavorAvailableEvent(NULL)
+{
+}
+
+void HelperThreadFavor::Init()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(ThisMaybeHelperThread());
+ }
+ CONTRACTL_END;
+
+ // Create events for managing favors.
+ m_FavorReadEvent = CreateWin32EventOrThrow(NULL, kAutoResetEvent, FALSE);
+ m_FavorAvailableEvent = CreateWin32EventOrThrow(NULL, kAutoResetEvent, FALSE);
+}
+
+
+
+DebuggerLazyInit::DebuggerLazyInit() :
+ m_pPendingEvals(NULL),
+ // @TODO: a-meicht
+ // Major clean up needed for giving the right flag
+ // There are cases where DebuggerDataLock is taken by managed thread and unmanaged trhead is also trying to take it.
+ // It could cause deadlock if we toggle GC upon taking lock.
+ // Unfortunately UNSAFE_COOPGC is not enough. There is a code path in Jit comipling that we are in GC Preemptive
+ // enabled. workaround by orring the unsafe_anymode flag. But we really need to do proper clean up.
+ //
+ // NOTE: If this ever gets fixed, you should replace CALLED_IN_DEBUGGERDATALOCK_HOLDER_SCOPE_MAY_GC_TRIGGERS_CONTRACT
+ // with appropriate contracts at each site.
+ //
+ m_DebuggerDataLock(CrstDebuggerJitInfo, (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_REENTRANCY | CRST_DEBUGGER_THREAD)),
+ m_CtrlCMutex(NULL),
+ m_exAttachEvent(NULL),
+ m_exUnmanagedAttachEvent(NULL),
+ m_DebuggerHandlingCtrlC(NULL)
+{
+}
+
+void DebuggerLazyInit::Init()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(ThisMaybeHelperThread());
+ }
+ CONTRACTL_END;
+
+ // Caller ensures this isn't double-called.
+
+ // This event is only used in the unmanaged attach case. We must mark this event handle as inheritable.
+ // Otherwise, the unmanaged debugger won't be able to notify us.
+ //
+ // Note that PAL currently doesn't support specifying the security attributes when creating an event, so
+ // unmanaged attach for unhandled exceptions is broken on PAL.
+ SECURITY_ATTRIBUTES* pSA = NULL;
+ SECURITY_ATTRIBUTES secAttrib;
+ secAttrib.nLength = sizeof(secAttrib);
+ secAttrib.lpSecurityDescriptor = NULL;
+ secAttrib.bInheritHandle = TRUE;
+
+ pSA = &secAttrib;
+
+ // Create some synchronization events...
+ // these events stay signaled all the time except when an attach is in progress
+ m_exAttachEvent = CreateWin32EventOrThrow(NULL, kManualResetEvent, TRUE);
+ m_exUnmanagedAttachEvent = CreateWin32EventOrThrow(pSA, kManualResetEvent, TRUE);
+
+ m_CtrlCMutex = CreateWin32EventOrThrow(NULL, kAutoResetEvent, FALSE);
+ m_DebuggerHandlingCtrlC = FALSE;
+
+ // Let the helper thread lazy init stuff too.
+ m_RCThread.Init();
+}
+
+
+DebuggerLazyInit::~DebuggerLazyInit()
+{
+ {
+ USHORT cBlobs = m_pMemBlobs.Count();
+ void **rgpBlobs = m_pMemBlobs.Table();
+
+ for (int i = 0; i < cBlobs; i++)
+ {
+ g_pDebugger->ReleaseRemoteBuffer(rgpBlobs[i], false);
+ }
+ }
+
+ if (m_pPendingEvals)
+ {
+ DeleteInteropSafe(m_pPendingEvals);
+ m_pPendingEvals = NULL;
+ }
+
+ if (m_CtrlCMutex != NULL)
+ {
+ CloseHandle(m_CtrlCMutex);
+ }
+
+ if (m_exAttachEvent != NULL)
+ {
+ CloseHandle(m_exAttachEvent);
+ }
+
+ if (m_exUnmanagedAttachEvent != NULL)
+ {
+ CloseHandle(m_exUnmanagedAttachEvent);
+ }
+}
+
+
+//
+// RequestFavor gets the debugger helper thread to call a function. It's
+// typically called when the current thread can't call the function directly,
+// e.g, there isn't enough stack space.
+//
+// RequestFavor can be called in stack-overflow scenarios and thus explicitly
+// avoids any lazy initialization.
+// It blocks until the favor callback completes.
+//
+// Parameters:
+// fp - a non-null Favour callback function
+// pData - the parameter passed to the favor callback function. This can be any value.
+//
+// Return values:
+// S_OK if the function succeeds, else a failure HRESULT
+//
+
+HRESULT Debugger::RequestFavor(FAVORCALLBACK fp, void * pData)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_TRIGGERS;
+ PRECONDITION(fp != NULL);
+ }
+ CONTRACTL_END;
+
+ if (m_pRCThread == NULL ||
+ m_pRCThread->GetRCThreadId() == GetCurrentThreadId())
+ {
+ // Since favors are only used internally, we know that the helper should alway be up and ready
+ // to handle them. Also, since favors can be used in low-stack scenarios, there's not any
+ // extra initialization needed for them.
+ _ASSERTE(!"Helper not initialized for favors.");
+ return E_UNEXPECTED;
+ }
+
+ m_pRCThread->DoFavor(fp, pData);
+ return S_OK;
+}
+
+/******************************************************************************
+// Called to set the interface that the Runtime exposes to us.
+ ******************************************************************************/
+void Debugger::SetEEInterface(EEDebugInterface* i)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // @@@
+
+ // Implements DebugInterface API
+
+ g_pEEInterface = i;
+
+}
+
+
+/******************************************************************************
+// Called to shut down the debugger. This stops the RC thread and cleans
+// the object up.
+ ******************************************************************************/
+void Debugger::StopDebugger(void)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Leak almost everything on process exit. The OS will clean it up anyways and trying to
+ // clean it up ourselves is just one more place we may AV / deadlock.
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ if (g_pDbgTransport != NULL)
+ {
+ g_pDbgTransport->Shutdown();
+ }
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+
+ // Ping the helper thread to exit. This will also prevent the helper from servicing new requests.
+ if (m_pRCThread != NULL)
+ {
+ m_pRCThread->AsyncStop();
+ }
+
+
+ // Also clean up the AppDomain stuff since this is cross-process.
+ TerminateAppDomainIPC ();
+
+
+ //
+ // Tell the VM to clear out all references to the debugger before we start cleaning up,
+ // so that nothing will reference (accidentally) through the partially cleaned up debugger.
+ //
+ // NOTE: we cannot clear out g_pDebugger before the delete call because the
+ // stuff in delete (particularly deleteinteropsafe) needs to look at it.
+ //
+ g_pEEInterface->ClearAllDebugInterfaceReferences();
+ g_pDebugger = NULL;
+}
+
+
+/* ------------------------------------------------------------------------ *
+ * JIT Interface routines
+ * ------------------------------------------------------------------------ */
+
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+DebuggerMethodInfo *Debugger::CreateMethodInfo(Module *module, mdMethodDef md)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+
+ PRECONDITION(HasDebuggerDataLock());
+ }
+ CONTRACTL_END;
+
+
+ // <TODO>@todo perf: creating these on the heap is slow. We should use a
+ // pool and create them out of there since we never free them
+ // until the AD is unloaded.</TODO>
+ //
+ DebuggerMethodInfo *mi = new (interopsafe) DebuggerMethodInfo(module, md);
+ _ASSERTE(mi != NULL); // throws on oom error
+
+ TRACE_ALLOC(mi);
+
+ LOG((LF_CORDB, LL_INFO100000, "D::CreateMethodInfo module=%p, token=0x%08x, info=%p\n",
+ module, md, mi));
+
+ //
+ // Lock a mutex when changing the table.
+ //
+ //@TODO : _ASSERTE(EnC);
+ HRESULT hr;
+ hr =InsertToMethodInfoList(mi);
+
+ if (FAILED(hr))
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "IAHOL Failed!!\n"));
+ DeleteInteropSafe(mi);
+ return NULL;
+ }
+ return mi;
+
+}
+
+
+
+
+
+/******************************************************************************
+// void Debugger::JITComplete(): JITComplete is called by
+// the jit interface when the JIT completes, successfully or not.
+//
+// MethodDesc* fd: MethodDesc of the code that's been JITted
+// BYTE* newAddress: The address of that the method begins at.
+// If newAddress is NULL then the JIT failed. Remember that this
+// gets called before the start address of the MethodDesc gets set,
+// and so methods like GetFunctionAddress & GetFunctionSize won't work.
+//
+// <TODO>@Todo If we're passed 0 for the newAddress param, the jit has been
+// cancelled & should be undone.</TODO>
+ ******************************************************************************/
+void Debugger::JITComplete(MethodDesc* fd, TADDR newAddress)
+{
+
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ PRECONDITION(!HasDebuggerDataLock());
+ PRECONDITION(newAddress != NULL);
+ CALLED_IN_DEBUGGERDATALOCK_HOLDER_SCOPE_MAY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+#ifdef _TARGET_ARM_
+ newAddress = newAddress|THUMB_CODE;
+#endif
+
+ // @@@
+ // Can be called on managed thread only
+ // This API Implements DebugInterface
+
+ if (CORDebuggerAttached())
+ {
+ // Populate the debugger's cache of DJIs. Normally we can do this lazily,
+ // the only reason we do it here is b/c the MethodDesc is not yet officially marked as "jitted",
+ // and so we can't lazily create it yet. Furthermore, the binding operations may need the DJIs.
+ //
+ // This also gives the debugger a chance to know if new JMC methods are coming.
+ DebuggerMethodInfo * dmi = GetOrCreateMethodInfo(fd->GetModule(), fd->GetMemberDef());
+ if (dmi == NULL)
+ {
+ goto Exit;
+ }
+ DebuggerJitInfo * ji = dmi->CreateInitAndAddJitInfo(fd, newAddress);
+
+ // Bind any IL patches to the newly jitted native code.
+ HRESULT hr;
+ hr = MapAndBindFunctionPatches(ji, fd, (CORDB_ADDRESS_TYPE *)newAddress);
+ _ASSERTE(SUCCEEDED(hr));
+ }
+
+ LOG((LF_CORDB, LL_EVERYTHING, "JitComplete completed successfully\n"));
+
+Exit:
+ ;
+}
+
+/******************************************************************************
+// Get the number of fixed arguments to a function, i.e., the explicit args and the "this" pointer.
+// This does not include other implicit arguments or varargs. This is used to compute a variable ID
+// (see comment in CordbJITILFrame::ILVariableToNative for more detail)
+// fVarArg is not used when this is called by Debugger::GetAndSendJITInfo, thus it has a default value.
+// The return value is not used when this is called by Debugger::getVars.
+ ******************************************************************************/
+SIZE_T Debugger::GetArgCount(MethodDesc *fd,BOOL *fVarArg /* = NULL */)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Create a MetaSig for the given method's sig. (Easier than
+ // picking the sig apart ourselves.)
+ PCCOR_SIGNATURE pCallSig;
+ DWORD cbCallSigSize;
+
+ fd->GetSig(&pCallSig, &cbCallSigSize);
+
+ if (pCallSig == NULL)
+ {
+ // Sig should only be null if the image is corrupted. (Even for lightweight-codegen)
+ // We expect the jit+verifier to catch this, so that we never land here.
+ // But just in case ...
+ CONSISTENCY_CHECK_MSGF(false, ("Corrupted image, null sig.(%s::%s)", fd->m_pszDebugClassName, fd->m_pszDebugMethodName));
+ return 0;
+ }
+
+ MetaSig msig(pCallSig, cbCallSigSize, g_pEEInterface->MethodDescGetModule(fd), NULL, MetaSig::sigMember);
+
+ // Get the arg count.
+ UINT32 NumArguments = msig.NumFixedArgs();
+
+ // Account for the 'this' argument.
+ if (!(g_pEEInterface->MethodDescIsStatic(fd)))
+ NumArguments++;
+
+ // Is this a VarArg's function?
+ if (msig.IsVarArg() && fVarArg != NULL)
+ {
+ *fVarArg = true;
+ }
+
+ return NumArguments;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+
+
+
+
+/******************************************************************************
+ DebuggerJitInfo * Debugger::GetJitInfo(): GetJitInfo
+ will return a pointer to a DebuggerJitInfo. If the DJI
+ doesn't exist, or it does exist, but the method has actually
+ been pitched (and the caller wants pitched methods filtered out),
+ then we'll return NULL.
+
+ Note: This will also create a DMI for if one does not exist for this DJI.
+
+ MethodDesc* fd: MethodDesc for the method we're interested in.
+ CORDB_ADDRESS_TYPE * pbAddr: Address within the code, to indicate which
+ version we want. If this is NULL, then we want the
+ head of the DebuggerJitInfo list, whether it's been
+ JITted or not.
+ ******************************************************************************/
+
+
+// Get a DJI from an address.
+DebuggerJitInfo *Debugger::GetJitInfoFromAddr(TADDR addr)
+{
+ WRAPPER_NO_CONTRACT;
+
+ MethodDesc *fd;
+ fd = g_pEEInterface->GetNativeCodeMethodDesc(addr);
+ _ASSERTE(fd);
+
+ return GetJitInfo(fd, (const BYTE*) addr, NULL);
+}
+
+// Get a DJI for a Native MD (MD for a native function).
+// In the EnC scenario, the MethodDesc refers to the most recent method.
+// This is very dangerous since there may be multiple versions alive at the same time.
+// This will give back the wrong DJI if we're lookikng for a stale method desc.
+// @todo - can a caller possibly use this correctly?
+DebuggerJitInfo *Debugger::GetLatestJitInfoFromMethodDesc(MethodDesc * pMethodDesc)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(pMethodDesc != NULL);
+ // We'd love to assert that we're jitted; but since this may be in the JitComplete
+ // callback path, we can't be sure.
+
+ return GetJitInfoWorker(pMethodDesc, NULL, NULL);
+}
+
+
+DebuggerJitInfo *Debugger::GetJitInfo(MethodDesc *fd, const BYTE *pbAddr, DebuggerMethodInfo **pMethInfo )
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(!g_pDebugger->HasDebuggerDataLock());
+ }
+ CONTRACTL_END;
+
+ // Address should be non-null and in range of MethodDesc. This lets us tell which EnC version.
+ _ASSERTE(pbAddr != NULL);
+
+ return GetJitInfoWorker(fd, pbAddr, pMethInfo);
+
+}
+
+// Internal worker to GetJitInfo. Doesn't validate parameters.
+DebuggerJitInfo *Debugger::GetJitInfoWorker(MethodDesc *fd, const BYTE *pbAddr, DebuggerMethodInfo **pMethInfo)
+{
+
+ DebuggerMethodInfo *dmi = NULL;
+ DebuggerJitInfo *dji = NULL;
+
+ // If we have a null MethodDesc - we're not going to get a jit-info. Do this check once at the top
+ // rather than littered throughout the rest of this function.
+ if (fd == NULL)
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "Debugger::GetJitInfo, addr=0x%p - null fd - returning null\n", pbAddr));
+ return NULL;
+ }
+ else
+ {
+ CONSISTENCY_CHECK_MSGF(!fd->IsWrapperStub(), ("Can't get Jit-info for wrapper MDesc,'%s'", fd->m_pszDebugMethodName));
+ }
+
+ // The debugger doesn't track Lightweight-codegen methods b/c they have no metadata.
+ if (fd->IsDynamicMethod())
+ {
+ return NULL;
+ }
+
+
+ // initialize our out param
+ if (pMethInfo)
+ {
+ *pMethInfo = NULL;
+ }
+
+ LOG((LF_CORDB, LL_EVERYTHING, "Debugger::GetJitInfo called\n"));
+ // CHECK_DJI_TABLE_DEBUGGER;
+
+ // Find the DJI via the DMI
+ //
+ // One way to improve the perf, both in terms of memory usage, number of allocations
+ // and lookup speeds would be to have the first JitInfo inline in the MethodInfo
+ // struct. After all, we never want to have a MethodInfo in the table without an
+ // associated JitInfo, and this should bring us back very close to the old situation
+ // in terms of perf. But correctness comes first, and perf later...
+ // CHECK_DMI_TABLE;
+ dmi = GetOrCreateMethodInfo(fd->GetModule(), fd->GetMemberDef());
+
+ if (dmi == NULL)
+ {
+ // If we can't create the DMI, we won't be able to create the DJI.
+ return NULL;
+ }
+
+
+ // This may take the lock and lazily create an entry, so we do it up front.
+ dji = dmi->GetLatestJitInfo(fd);
+
+
+ DebuggerDataLockHolder debuggerDataLockHolder(this);
+
+ // Note the call to GetLatestJitInfo() will lazily create the first DJI if we don't already have one.
+ for (; dji != NULL; dji = dji->m_prevJitInfo)
+ {
+ if (PTR_TO_TADDR(dji->m_fd) == PTR_HOST_TO_TADDR(fd))
+ {
+ break;
+ }
+ }
+ LOG((LF_CORDB, LL_INFO1000, "D::GJI: for md:0x%x (%s::%s), got dmi:0x%x.\n",
+ fd, fd->m_pszDebugClassName, fd->m_pszDebugMethodName,
+ dmi));
+
+
+
+
+ // Log stuff - fd may be null; so we don't want to AV in the log.
+
+ LOG((LF_CORDB, LL_INFO1000, "D::GJI: for md:0x%x (%s::%s), got dmi:0x%x, dji:0x%x, latest dji:0x%x, latest fd:0x%x, prev dji:0x%x\n",
+ fd, fd->m_pszDebugClassName, fd->m_pszDebugMethodName,
+ dmi, dji, (dmi ? dmi->GetLatestJitInfo_NoCreate() : 0),
+ ((dmi && dmi->GetLatestJitInfo_NoCreate()) ? dmi->GetLatestJitInfo_NoCreate()->m_fd:0),
+ (dji?dji->m_prevJitInfo:0)));
+
+ if ((dji != NULL) && (pbAddr != NULL))
+ {
+ dji = dji->GetJitInfoByAddress(pbAddr);
+
+ // XXX Microsoft - dac doesn't support stub tracing
+ // so this just results in not-impl exceptions.
+#ifndef DACCESS_COMPILE
+ if (dji == NULL) //may have been given address of a thunk
+ {
+ LOG((LF_CORDB,LL_INFO1000,"Couldn't find a DJI by address 0x%p, "
+ "so it might be a stub or thunk\n", pbAddr));
+ TraceDestination trace;
+
+ g_pEEInterface->TraceStub((const BYTE *)pbAddr, &trace);
+
+ if ((trace.GetTraceType() == TRACE_MANAGED) && (pbAddr != (const BYTE *)trace.GetAddress()))
+ {
+ LOG((LF_CORDB,LL_INFO1000,"Address thru thunk"
+ ": 0x%p\n", trace.GetAddress()));
+ dji = GetJitInfo(fd, dac_cast<PTR_CBYTE>(trace.GetAddress()));
+ }
+#ifdef LOGGING
+ else
+ {
+ _ASSERTE(trace.GetTraceType() != TRACE_UNJITTED_METHOD ||
+ (fd == trace.GetMethodDesc()));
+ LOG((LF_CORDB,LL_INFO1000,"Address not thunked - "
+ "must be to unJITted method, or normal managed "
+ "method lacking a DJI!\n"));
+ }
+#endif //LOGGING
+ }
+#endif // #ifndef DACCESS_COMPILE
+ }
+
+ if (pMethInfo)
+ {
+ *pMethInfo = dmi;
+ }
+
+ // DebuggerDataLockHolder out of scope - release implied
+
+ return dji;
+}
+
+DebuggerMethodInfo *Debugger::GetOrCreateMethodInfo(Module *pModule, mdMethodDef token)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ SUPPORTS_DAC;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ DebuggerMethodInfo *info = NULL;
+
+ // When dump debugging, we don't expect to have a lock,
+ // nor would it be useful for anything.
+ ALLOW_DATATARGET_MISSING_MEMORY(
+ // In case we don't have already, take it now.
+ DebuggerDataLockHolder debuggerDataLockHolder(this);
+ );
+
+ if (m_pMethodInfos != NULL)
+ {
+ info = m_pMethodInfos->GetMethodInfo(pModule, token);
+ }
+
+ // dac checks ngen'ed image content first, so
+ // if we didn't find information it doesn't exist.
+#ifndef DACCESS_COMPILE
+ if (info == NULL)
+ {
+ info = CreateMethodInfo(pModule, token);
+
+ LOG((LF_CORDB, LL_INFO1000, "D::GOCMI: created DMI for mdToken:0x%x, dmi:0x%x\n",
+ token, info));
+ }
+#endif // #ifndef DACCESS_COMPILE
+
+
+ if (info == NULL)
+ {
+ // This should only happen in an oom scenario. It would be nice to throw here.
+ STRESS_LOG2(LF_CORDB, LL_EVERYTHING, "OOM - Failed to allocate DJI (0x%p, 0x%x)\n", pModule, token);
+ }
+
+ // DebuggerDataLockHolder out of scope - release implied
+ return info;
+}
+
+
+#ifndef DACCESS_COMPILE
+
+/******************************************************************************
+ * GetILToNativeMapping returns a map from IL offsets to native
+ * offsets for this code. An array of COR_PROF_IL_TO_NATIVE_MAP
+ * structs will be returned, and some of the ilOffsets in this array
+ * may be the values specified in CorDebugIlToNativeMappingTypes.
+ ******************************************************************************/
+HRESULT Debugger::GetILToNativeMapping(MethodDesc *pMD, ULONG32 cMap,
+ ULONG32 *pcMap, COR_DEBUG_IL_TO_NATIVE_MAP map[])
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_TRIGGERS_FROM_GETJITINFO;
+ }
+ CONTRACTL_END;
+
+#ifdef PROFILING_SUPPORTED
+ // At this point, we're pulling in the debugger.
+ if (!HasLazyData())
+ {
+ DebuggerLockHolder lockHolder(this);
+ LazyInit(); // throws
+ }
+
+ // Get the JIT info by functionId.
+
+ // This function is unsafe to use during EnC because the MethodDesc doesn't tell
+ // us which version is being requested.
+ // However, this function is only used by the profiler, and you can't profile with EnC,
+ // which means that getting the latest jit-info is still correct.
+#if defined(PROFILING_SUPPORTED)
+ _ASSERTE(CORProfilerPresent());
+#endif // PROFILING_SUPPORTED
+
+ DebuggerJitInfo *pDJI = GetLatestJitInfoFromMethodDesc(pMD);
+
+ // Dunno what went wrong
+ if (pDJI == NULL)
+ return (E_FAIL);
+
+ // If they gave us space to copy into...
+ if (map != NULL)
+ {
+ // Only copy as much as either they gave us or we have to copy.
+ ULONG32 cpyCount = min(cMap, pDJI->GetSequenceMapCount());
+
+ // Read the map right out of the Left Side.
+ if (cpyCount > 0)
+ ExportILToNativeMap(cpyCount,
+ map,
+ pDJI->GetSequenceMap(),
+ pDJI->m_sizeOfCode);
+ }
+
+ // Return the true count of entries
+ if (pcMap)
+ {
+ *pcMap = pDJI->GetSequenceMapCount();
+ }
+
+ return (S_OK);
+#else
+ return E_NOTIMPL;
+#endif
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// This is morally the same as GetILToNativeMapping, except the output is in a different
+// format, to better facilitate sending the ETW ILToNativeMap events.
+//
+// Arguments:
+// pMD - MethodDesc whose IL-to-native map will be returned
+// cMapMax - Max number of map entries to return. Although
+// this function handles the allocation of the returned
+// array, the caller still wants to limit how big this
+// can get, since ETW itself has limits on how big
+// events can get
+// pcMap - [out] Number of entries returned in each output parallel array (next
+// two parameters).
+// prguiILOffset - [out] Array of IL offsets. This function allocates, caller must free.
+// prguiNativeOffset - [out] Array of the starting native offsets that correspond
+// to each (*prguiILOffset)[i]. This function allocates,
+// caller must free.
+//
+// Return Value:
+// HRESULT indicating success or failure.
+//
+// Notes:
+// * This function assumes lazy data has already been initialized (in order to
+// ensure that this doesn't trigger or take the large debugger mutex). So
+// callers must guarantee they call InitializeLazyDataIfNecessary() first.
+// * Either this function fails, and (*prguiILOffset) & (*prguiNativeOffset) will be
+// untouched OR this function succeeds and (*prguiILOffset) & (*prguiNativeOffset)
+// will both be non-NULL, set to the parallel arrays this function allocated.
+// * If this function returns success, then the caller must free (*prguiILOffset) and
+// (*prguiNativeOffset)
+// * (*prguiILOffset) and (*prguiNativeOffset) are parallel arrays, such that
+// (*prguiILOffset)[i] corresponds to (*prguiNativeOffset)[i] for each 0 <= i < *pcMap
+// * If EnC is enabled, this function will return the IL-to-native mapping for the latest
+// EnC version of the function. This may not be what the profiler wants, but EnC
+// + ETW-map events is not a typical combination, and this is consistent with
+// other ETW events like JittingStarted or MethodLoad, which also fire multiple
+// events for the same MethodDesc (each time it's EnC'd), with each event
+// corresponding to the most recent EnC version at the time.
+//
+
+HRESULT Debugger::GetILToNativeMappingIntoArrays(
+ MethodDesc * pMD,
+ USHORT cMapMax,
+ USHORT * pcMap,
+ UINT ** prguiILOffset,
+ UINT ** prguiNativeOffset)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pcMap != NULL);
+ _ASSERTE(prguiILOffset != NULL);
+ _ASSERTE(prguiNativeOffset != NULL);
+
+ // Any caller of GetILToNativeMappingIntoArrays had better call
+ // InitializeLazyDataIfNecessary first!
+ _ASSERTE(HasLazyData());
+
+ // Get the JIT info by functionId.
+
+ DebuggerJitInfo * pDJI = GetLatestJitInfoFromMethodDesc(pMD);
+
+ // Dunno what went wrong
+ if (pDJI == NULL)
+ return E_FAIL;
+
+ ULONG32 cMap = min(cMapMax, pDJI->GetSequenceMapCount());
+ DebuggerILToNativeMap * rgMapInt = pDJI->GetSequenceMap();
+
+ NewArrayHolder<UINT> rguiILOffsetTemp = new (nothrow) UINT[cMap];
+ if (rguiILOffsetTemp == NULL)
+ return E_OUTOFMEMORY;
+
+ NewArrayHolder<UINT> rguiNativeOffsetTemp = new (nothrow) UINT[cMap];
+ if (rguiNativeOffsetTemp == NULL)
+ return E_OUTOFMEMORY;
+
+ for (ULONG32 iMap=0; iMap < cMap; iMap++)
+ {
+ rguiILOffsetTemp[iMap] = rgMapInt[iMap].ilOffset;
+ rguiNativeOffsetTemp[iMap] = rgMapInt[iMap].nativeStartOffset;
+ }
+
+ // Since cMap is the min of cMapMax (and something else) and cMapMax is a USHORT,
+ // then cMap must fit in a USHORT as well
+ _ASSERTE(FitsIn<USHORT>(cMap));
+ *pcMap = (USHORT) cMap;
+ *prguiILOffset = rguiILOffsetTemp.Extract();
+ *prguiNativeOffset = rguiNativeOffsetTemp.Extract();
+
+ return S_OK;
+}
+
+
+
+
+#endif // #ifndef DACCESS_COMPILE
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+CodeRegionInfo CodeRegionInfo::GetCodeRegionInfo(DebuggerJitInfo *dji, MethodDesc *md, PTR_CORDB_ADDRESS_TYPE addr)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (dji && dji->m_addrOfCode)
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "CRI::GCRI: simple case\n"));
+ return dji->m_codeRegionInfo;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "CRI::GCRI: more complex case\n"));
+ CodeRegionInfo codeRegionInfo;
+
+ // Use method desc from dji if present
+ if (dji && dji->m_fd)
+ {
+ _ASSERTE(!md || md == dji->m_fd);
+ md = dji->m_fd;
+ }
+
+ if (!addr)
+ {
+ _ASSERTE(md);
+ addr = dac_cast<PTR_CORDB_ADDRESS_TYPE>(g_pEEInterface->GetFunctionAddress(md));
+ }
+ else
+ {
+ _ASSERTE(!md ||
+ (addr == dac_cast<PTR_CORDB_ADDRESS_TYPE>(g_pEEInterface->GetFunctionAddress(md))));
+ }
+
+ if (addr)
+ {
+ PCODE pCode = (PCODE)dac_cast<TADDR>(addr);
+#ifdef _TARGET_ARM_
+ pCode |= THUMB_CODE;
+#endif
+ codeRegionInfo.InitializeFromStartAddress(pCode);
+ }
+
+ return codeRegionInfo;
+ }
+}
+
+
+#ifndef DACCESS_COMPILE
+/******************************************************************************
+// Helper function for getBoundaries to get around AMD64 compiler and
+// contract holders with PAL_TRY in the same function.
+ ******************************************************************************/
+void Debugger::getBoundariesHelper(MethodDesc * md,
+ unsigned int *cILOffsets,
+ DWORD **pILOffsets)
+{
+ //
+ // CANNOT ADD A CONTRACT HERE. Contract is in getBoundaries
+ //
+
+ //
+ // Grab the JIT info struct for this method. Create if needed, as this
+ // may be called before JITComplete.
+ //
+ DebuggerMethodInfo *dmi = NULL;
+ dmi = GetOrCreateMethodInfo(md->GetModule(), md->GetMemberDef());
+
+ if (dmi != NULL)
+ {
+ LOG((LF_CORDB,LL_INFO10000,"De::NGB: Got dmi 0x%x\n",dmi));
+
+#if defined(FEATURE_ISYM_READER)
+ // Note: we need to make sure to enable preemptive GC here just in case we block in the symbol reader.
+ GCX_PREEMP_EEINTERFACE();
+
+ Module *pModule = md->GetModule();
+ (void)pModule; //prevent "unused variable" error from GCC
+ _ASSERTE(pModule != NULL);
+
+ SafeComHolder<ISymUnmanagedReader> pReader(pModule->GetISymUnmanagedReader());
+
+ // If we got a reader, use it.
+ if (pReader != NULL)
+ {
+ // Grab the sym reader's method.
+ ISymUnmanagedMethod *pISymMethod;
+
+ HRESULT hr = pReader->GetMethod(md->GetMemberDef(),
+ &pISymMethod);
+
+ ULONG32 n = 0;
+
+ if (SUCCEEDED(hr))
+ {
+ // Get the count of sequence points.
+ hr = pISymMethod->GetSequencePointCount(&n);
+ _ASSERTE(SUCCEEDED(hr));
+
+
+ LOG((LF_CORDB, LL_INFO100000,
+ "D::NGB: Reader seq pt count is %d\n", n));
+
+ ULONG32 *p;
+
+ if (n > 0)
+ {
+ ULONG32 dummy;
+
+ p = new ULONG32[n];
+ _ASSERTE(p != NULL); // throws on oom errror
+
+ hr = pISymMethod->GetSequencePoints(n, &dummy,
+ p, NULL, NULL, NULL,
+ NULL, NULL);
+ _ASSERTE(SUCCEEDED(hr));
+ _ASSERTE(dummy == n);
+
+ *pILOffsets = (DWORD*)p;
+
+ // Translate the IL offets based on an
+ // instrumented IL map if one exists.
+ if (dmi->HasInstrumentedILMap())
+ {
+ InstrumentedILOffsetMapping mapping =
+ dmi->GetRuntimeModule()->GetInstrumentedILOffsetMapping(dmi->m_token);
+
+ for (SIZE_T i = 0; i < n; i++)
+ {
+ int origOffset = *p;
+
+ *p = dmi->TranslateToInstIL(
+ &mapping,
+ origOffset,
+ bOriginalToInstrumented);
+
+ LOG((LF_CORDB, LL_INFO100000,
+ "D::NGB: 0x%04x (Real IL:0x%x)\n",
+ origOffset, *p));
+
+ p++;
+ }
+ }
+#ifdef LOGGING
+ else
+ {
+ for (SIZE_T i = 0; i < n; i++)
+ {
+ LOG((LF_CORDB, LL_INFO100000,
+ "D::NGB: 0x%04x \n", *p));
+ p++;
+ }
+ }
+#endif
+ }
+ else
+ *pILOffsets = NULL;
+
+ pISymMethod->Release();
+ }
+ else
+ {
+
+ *pILOffsets = NULL;
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "De::NGB: failed to find method 0x%x in sym reader.\n",
+ md->GetMemberDef()));
+ }
+
+ *cILOffsets = n;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO100000, "D::NGB: no reader.\n"));
+ }
+
+#else // FEATURE_ISYM_READER
+ // We don't have ISymUnmanagedReader. Pretend there are no sequence points.
+ *cILOffsets = 0;
+#endif // FEATURE_ISYM_READER
+ }
+
+ LOG((LF_CORDB, LL_INFO100000, "D::NGB: cILOffsets=%d\n", *cILOffsets));
+ return;
+}
+#endif
+
+/******************************************************************************
+// Use an ISymUnmanagedReader to get method sequence points.
+ ******************************************************************************/
+void Debugger::getBoundaries(MethodDesc * md,
+ unsigned int *cILOffsets,
+ DWORD **pILOffsets,
+ ICorDebugInfo::BoundaryTypes *implicitBoundaries)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ // May be here even when a debugger is not attached.
+
+ // @@@
+ // Implements DebugInterface API
+
+ *cILOffsets = 0;
+ *pILOffsets = NULL;
+ *implicitBoundaries = ICorDebugInfo::DEFAULT_BOUNDARIES;
+ // If there has been an unrecoverable Left Side error, then we
+ // just pretend that there are no boundaries.
+ if (CORDBUnrecoverableError(this))
+ {
+ return;
+ }
+
+ // LCG methods have their own resolution scope that is seperate from a module
+ // so they shouldn't have their symbols looked up in the module PDB. Right now
+ // LCG methods have no symbols so we can just early out, but if they ever
+ // had some symbols attached we would need a different way of getting to them.
+ // See Dev10 issue 728519
+ if(md->IsLCGMethod())
+ {
+ return;
+ }
+
+ // If JIT optimizations are allowed for the module this function
+ // lives in, then don't grab specific boundaries from the symbol
+ // store since any boundaries we give the JIT will be pretty much
+ // ignored anyway.
+ if (!CORDisableJITOptimizations(md->GetModule()->GetDebuggerInfoBits()))
+ {
+ *implicitBoundaries = ICorDebugInfo::BoundaryTypes(ICorDebugInfo::STACK_EMPTY_BOUNDARIES |
+ ICorDebugInfo::CALL_SITE_BOUNDARIES);
+
+ return;
+ }
+
+ Module* pModule = md->GetModule();
+ DWORD dwBits = pModule->GetDebuggerInfoBits();
+ if ((dwBits & DACF_IGNORE_PDBS) != 0)
+ {
+ //
+ // If told to explicitly ignore PDBs for this function, then bail now.
+ //
+ return;
+ }
+
+ if( !pModule->IsSymbolReadingEnabled() )
+ {
+ // Symbol reading is disabled for this module, so bail out early (for efficiency only)
+ return;
+ }
+
+ if (pModule == SystemDomain::SystemModule())
+ {
+ // We don't look up PDBs for mscorlib. This is not quite right, but avoids
+ // a bootstrapping problem. When an EXE loads, it has the option of setting
+ // the COM appartment model to STA if we need to. It is important that no
+ // other Coinitialize happens before this. Since loading the PDB reader uses
+ // com we can not come first. However managed code IS run before the COM
+ // appartment model is set, and thus we have a problem since this code is
+ // called for when JITTing managed code. We avoid the problem by just
+ // bailing for mscorlib.
+ return;
+ }
+
+ // At this point, we're pulling in the debugger.
+ if (!HasLazyData())
+ {
+ DebuggerLockHolder lockHolder(this);
+ LazyInit(); // throws
+ }
+
+ getBoundariesHelper(md, cILOffsets, pILOffsets);
+
+#else
+ DacNotImpl();
+#endif // #ifndef DACCESS_COMPILE
+}
+
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+void Debugger::getVars(MethodDesc * md, ULONG32 *cVars, ICorDebugInfo::ILVarInfo **vars,
+ bool *extendOthers)
+{
+#ifndef DACCESS_COMPILE
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_TRIGGERS_FROM_GETJITINFO;
+ PRECONDITION(!ThisIsHelperThreadWorker());
+ }
+ CONTRACTL_END;
+
+
+
+ // At worst return no information
+ *cVars = 0;
+ *vars = NULL;
+
+ // Just tell the JIT to extend everything.
+ // Note that if optimizations are enabled, the native compilers are
+ // free to ingore *extendOthers
+ *extendOthers = true;
+
+ DWORD bits = md->GetModule()->GetDebuggerInfoBits();
+
+ if (CORDBUnrecoverableError(this))
+ goto Exit;
+
+ if (CORDisableJITOptimizations(bits))
+// if (!CORDebuggerAllowJITOpts(bits))
+ {
+ //
+ // @TODO: Do we really need this code since *extendOthers==true?
+ //
+
+ // Is this a vararg function?
+ BOOL fVarArg = false;
+ GetArgCount(md, &fVarArg);
+
+ if (fVarArg)
+ {
+ COR_ILMETHOD *ilMethod = g_pEEInterface->MethodDescGetILHeader(md);
+
+ if (ilMethod)
+ {
+ // It is, so we need to tell the JIT to give us the
+ // varags handle.
+ ICorDebugInfo::ILVarInfo *p = new ICorDebugInfo::ILVarInfo[1];
+ _ASSERTE(p != NULL); // throws on oom error
+
+ COR_ILMETHOD_DECODER header(ilMethod);
+ unsigned int ilCodeSize = header.GetCodeSize();
+
+ p->startOffset = 0;
+ p->endOffset = ilCodeSize;
+ p->varNumber = (DWORD) ICorDebugInfo::VARARGS_HND_ILNUM;
+
+ *cVars = 1;
+ *vars = p;
+ }
+ }
+ }
+
+ LOG((LF_CORDB, LL_INFO100000, "D::gV: cVars=%d, extendOthers=%d\n",
+ *cVars, *extendOthers));
+
+Exit:
+ ;
+#else
+ DacNotImpl();
+#endif // #ifndef DACCESS_COMPILE
+}
+
+
+#ifndef DACCESS_COMPILE
+
+// If we have a varargs function, we can't set the IP (we don't know how to pack/unpack the arguments), so if we
+// call SetIP with fCanSetIPOnly = true, we need to check for that.
+// Arguments:
+// input: nEntries - number of entries in varNativeInfo
+// varNativeInfo - array of entries describing the args and locals for the function
+// output: true iff the function has varargs
+BOOL Debugger::IsVarArgsFunction(unsigned int nEntries, PTR_NativeVarInfo varNativeInfo)
+{
+ for (unsigned int i = 0; i < nEntries; ++i)
+ {
+ if (varNativeInfo[i].loc.vlType == ICorDebugInfo::VLT_FIXED_VA)
+ {
+ return TRUE;
+ }
+ }
+ return FALSE;
+}
+
+// We want to keep the 'worst' HRESULT - if one has failed (..._E_...) & the
+// other hasn't, take the failing one. If they've both/neither failed, then
+// it doesn't matter which we take.
+// Note that this macro favors retaining the first argument
+#define WORST_HR(hr1,hr2) (FAILED(hr1)?hr1:hr2)
+/******************************************************************************
+ *
+ ******************************************************************************/
+HRESULT Debugger::SetIP( bool fCanSetIPOnly, Thread *thread,Module *module,
+ mdMethodDef mdMeth, DebuggerJitInfo* dji,
+ SIZE_T offsetILTo, BOOL fIsIL)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(thread));
+ PRECONDITION(CheckPointer(module));
+ PRECONDITION(mdMeth != mdMethodDefNil);
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ static ConfigDWORD breakOnSetIP;
+ if (breakOnSetIP.val(CLRConfig::INTERNAL_DbgBreakOnSetIP)) _ASSERTE(!"DbgBreakOnSetIP");
+#endif
+
+ HRESULT hr = S_OK;
+ HRESULT hrAdvise = S_OK;
+
+ DWORD offsetILFrom;
+ CorDebugMappingResult map;
+ DWORD whichIgnore;
+
+ ControllerStackInfo csi;
+
+ BOOL exact;
+ SIZE_T offsetNatTo;
+
+ PCODE pbDest = NULL;
+ BYTE *pbBase = NULL;
+ CONTEXT *pCtx = NULL;
+ DWORD dwSize = 0;
+ SIZE_T *rgVal1 = NULL;
+ SIZE_T *rgVal2 = NULL;
+ BYTE **pVCs = NULL;
+
+ LOG((LF_CORDB, LL_INFO1000, "D::SIP: In SetIP ==> fCanSetIPOnly:0x%x <==!\n", fCanSetIPOnly));
+
+ if (ReJitManager::IsReJITEnabled())
+ {
+ return CORDBG_E_SET_IP_IMPOSSIBLE;
+ }
+
+ pCtx = GetManagedStoppedCtx(thread);
+
+ // If we can't get a context, then we can't possibly be a in a good place
+ // to do a setip.
+ if (pCtx == NULL)
+ {
+ return CORDBG_S_BAD_START_SEQUENCE_POINT;
+ }
+
+ // Implicit Caveat: We need to be the active frame.
+ // We can safely take a stack trace because the thread is synchronized.
+ StackTraceTicket ticket(thread);
+ csi.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, NULL);
+
+ ULONG offsetNatFrom = csi.m_activeFrame.relOffset;
+#if defined(WIN64EXCEPTIONS)
+ if (csi.m_activeFrame.IsFuncletFrame())
+ {
+ offsetNatFrom = (ULONG)((SIZE_T)GetControlPC(&(csi.m_activeFrame.registers)) -
+ (SIZE_T)(dji->m_addrOfCode));
+ }
+#endif // WIN64EXCEPTIONS
+
+ _ASSERTE(dji != NULL);
+
+ // On WIN64 platforms, it's important to use the total size of the
+ // parent method and the funclets below (i.e. m_sizeOfCode). Don't use
+ // the size of the individual funclets or the parent method.
+ pbBase = (BYTE*)CORDB_ADDRESS_TO_PTR(dji->m_addrOfCode);
+ dwSize = (DWORD)dji->m_sizeOfCode;
+#if defined(WIN64EXCEPTIONS)
+ // Currently, method offsets are not bigger than 4 bytes even on WIN64.
+ // Assert that it is so here.
+ _ASSERTE((SIZE_T)dwSize == dji->m_sizeOfCode);
+#endif // WIN64EXCEPTIONS
+
+
+ // Create our structure for analyzing this.
+ // <TODO>@PERF: optimize - hold on to this so we don't rebuild it for both
+ // CanSetIP & SetIP.</TODO>
+ int cFunclet = 0;
+ const DWORD * rgFunclet = NULL;
+#if defined(WIN64EXCEPTIONS)
+ cFunclet = dji->GetFuncletCount();
+ rgFunclet = dji->m_rgFunclet;
+#endif // WIN64EXCEPTIONS
+
+ EHRangeTree* pEHRT = new (nothrow) EHRangeTree(csi.m_activeFrame.pIJM,
+ csi.m_activeFrame.MethodToken,
+ dwSize,
+ cFunclet,
+ rgFunclet);
+
+ // To maintain the current semantics, we will check the following right before SetIPFromSrcToDst() is called
+ // (instead of checking them now):
+ // 1) pEHRT == NULL
+ // 2) FAILED(pEHRT->m_hrInit)
+
+
+ {
+ LOG((LF_CORDB, LL_INFO1000, "D::SIP:Got version info fine\n"));
+
+ // Caveat: we need to start from a sequence point
+ offsetILFrom = dji->MapNativeOffsetToIL(offsetNatFrom,
+ &map, &whichIgnore);
+ if ( !(map & MAPPING_EXACT) )
+ {
+ LOG((LF_CORDB, LL_INFO1000, "D::SIP:Starting native offset is bad!\n"));
+ hrAdvise = WORST_HR(hrAdvise, CORDBG_S_BAD_START_SEQUENCE_POINT);
+ }
+ else
+ { // exact IL mapping
+
+ if (!(dji->GetSrcTypeFromILOffset(offsetILFrom) & ICorDebugInfo::STACK_EMPTY))
+ {
+ LOG((LF_CORDB, LL_INFO1000, "D::SIP:Starting offset isn't stack empty!\n"));
+ hrAdvise = WORST_HR(hrAdvise, CORDBG_S_BAD_START_SEQUENCE_POINT);
+ }
+ }
+
+ // Caveat: we need to go to a sequence point
+ if (fIsIL )
+ {
+#if defined(WIN64EXCEPTIONS)
+ int funcletIndexFrom = dji->GetFuncletIndex((CORDB_ADDRESS)offsetNatFrom, DebuggerJitInfo::GFIM_BYOFFSET);
+ offsetNatTo = dji->MapILOffsetToNativeForSetIP(offsetILTo, funcletIndexFrom, pEHRT, &exact);
+#else // WIN64EXCEPTIONS
+ DebuggerJitInfo::ILToNativeOffsetIterator it;
+ dji->InitILToNativeOffsetIterator(it, offsetILTo);
+ offsetNatTo = it.CurrentAssertOnlyOne(&exact);
+#endif // WIN64EXCEPTIONS
+
+ if (!exact)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "D::SIP:Dest (via IL offset) is bad!\n"));
+ hrAdvise = WORST_HR(hrAdvise, CORDBG_S_BAD_END_SEQUENCE_POINT);
+ }
+ }
+ else
+ {
+ offsetNatTo = offsetILTo;
+ LOG((LF_CORDB, LL_INFO1000, "D::SIP:Dest of 0x%p (via native "
+ "offset) is fine!\n", offsetNatTo));
+ }
+
+ CorDebugMappingResult mapping;
+ DWORD which;
+ offsetILTo = dji->MapNativeOffsetToIL(offsetNatTo, &mapping, &which);
+
+ // We only want to perhaps return CORDBG_S_BAD_END_SEQUENCE_POINT if
+ // we're not already returning CORDBG_S_BAD_START_SEQUENCE_POINT.
+ if (hr != CORDBG_S_BAD_START_SEQUENCE_POINT)
+ {
+ if ( !(mapping & MAPPING_EXACT) )
+ {
+ LOG((LF_CORDB, LL_INFO1000, "D::SIP:Ending native offset is bad!\n"));
+ hrAdvise = WORST_HR(hrAdvise, CORDBG_S_BAD_END_SEQUENCE_POINT);
+ }
+ else
+ {
+ // <NOTE WIN64>
+ // All duplicate sequence points (ones with the same IL offset) should have the same SourceTypes.
+ // </NOTE WIN64>
+ if (!(dji->GetSrcTypeFromILOffset(offsetILTo) & ICorDebugInfo::STACK_EMPTY))
+ {
+ LOG((LF_CORDB, LL_INFO1000, "D::SIP:Ending offset isn't a sequence"
+ " point, or not stack empty!\n"));
+ hrAdvise = WORST_HR(hrAdvise, CORDBG_S_BAD_END_SEQUENCE_POINT);
+ }
+ }
+ }
+
+ // Once we finally have a native offset, it had better be in range.
+ if (offsetNatTo >= dwSize)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "D::SIP:Code out of range! offsetNatTo = 0x%x, dwSize=0x%x\n", offsetNatTo, dwSize));
+ hrAdvise = E_INVALIDARG;
+ goto LExit;
+ }
+
+ pbDest = CodeRegionInfo::GetCodeRegionInfo(dji).OffsetToAddress(offsetNatTo);
+ LOG((LF_CORDB, LL_INFO1000, "D::SIP:Dest is 0x%p\n", pbDest));
+
+ // Don't allow SetIP if the source or target is cold (SetIPFromSrcToDst does not
+ // correctly handle this case).
+ if (!CodeRegionInfo::GetCodeRegionInfo(dji).IsOffsetHot(offsetNatTo) ||
+ !CodeRegionInfo::GetCodeRegionInfo(dji).IsOffsetHot(offsetNatFrom))
+ {
+ hrAdvise = WORST_HR(hrAdvise, CORDBG_E_SET_IP_IMPOSSIBLE);
+ goto LExit;
+ }
+ }
+
+ if (!fCanSetIPOnly)
+ {
+ hr = ShuffleVariablesGet(dji,
+ offsetNatFrom,
+ pCtx,
+ &rgVal1,
+ &rgVal2,
+ &pVCs);
+ LOG((LF_CORDB|LF_ENC,
+ LL_INFO10000,
+ "D::SIP: rgVal1 0x%X, rgVal2 0x%X\n",
+ rgVal1,
+ rgVal2));
+
+ if (FAILED(hr))
+ {
+ // This will only fail fatally, so exit.
+ hrAdvise = WORST_HR(hrAdvise, hr);
+ goto LExit;
+ }
+ }
+ else // fCanSetIPOnly
+ {
+ if (IsVarArgsFunction(dji->GetVarNativeInfoCount(), dji->GetVarNativeInfo()))
+ {
+ hrAdvise = E_INVALIDARG;
+ goto LExit;
+ }
+ }
+
+
+ if (pEHRT == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ }
+ else if (FAILED(pEHRT->m_hrInit))
+ {
+ hr = pEHRT->m_hrInit;
+ }
+ else
+ {
+ //
+ // This is a known, ok, violation. END_EXCEPTION_GLUE has a call to GetThrowable in it, but
+ // we will never hit it because we are passing in NULL below. This is to satisfy the static
+ // contract analyzer.
+ //
+ CONTRACT_VIOLATION(GCViolation);
+
+ EX_TRY
+ {
+ hr =g_pEEInterface->SetIPFromSrcToDst(thread,
+ pbBase,
+ offsetNatFrom,
+ (DWORD)offsetNatTo,
+ fCanSetIPOnly,
+ &(csi.m_activeFrame.registers),
+ pCtx,
+ (void *)dji,
+ pEHRT);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ }
+
+ // Get the return code, if any
+ if (hr != S_OK)
+ {
+ hrAdvise = WORST_HR(hrAdvise, hr);
+ goto LExit;
+ }
+
+ // If we really want to do this, we'll have to put the
+ // variables into their new locations.
+ if (!fCanSetIPOnly && !FAILED(hrAdvise))
+ {
+ // TODO: We should zero out any registers which have now become live GC roots,
+ // but which aren't tracked variables (i.e. they are JIT temporaries). Such registers may
+ // have garbage left over in them, and we don't want the GC to try and dereference them
+ // as object references. However, we can't easily tell here which of the callee-saved regs
+ // are used in this method and therefore safe to clear.
+ //
+
+ hr = ShuffleVariablesSet(dji,
+ offsetNatTo,
+ pCtx,
+ &rgVal1,
+ &rgVal2,
+ pVCs);
+
+
+ if (hr != S_OK)
+ {
+ hrAdvise = WORST_HR(hrAdvise, hr);
+ goto LExit;
+ }
+
+ _ASSERTE(pbDest != NULL);
+
+ ::SetIP(pCtx, pbDest);
+
+ LOG((LF_CORDB, LL_INFO1000, "D::SIP:Set IP to be 0x%p\n", GetIP(pCtx)));
+ }
+
+
+LExit:
+ if (rgVal1 != NULL)
+ {
+ DeleteInteropSafe(rgVal1);
+ }
+
+ if (rgVal2 != NULL)
+ {
+ DeleteInteropSafe(rgVal2);
+ }
+
+#if defined(_TARGET_X86_) || defined(_WIN64) || defined(_TARGET_ARM_)
+ if (pEHRT != NULL)
+ {
+ delete pEHRT;
+ }
+#endif // _TARGET_X86_ || _WIN64
+
+ LOG((LF_CORDB, LL_INFO1000, "D::SIP:Returning 0x%x\n", hr));
+ return hrAdvise;
+}
+
+#include "nativevaraccessors.h"
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+
+HRESULT Debugger::ShuffleVariablesGet(DebuggerJitInfo *dji,
+ SIZE_T offsetFrom,
+ CONTEXT *pCtx,
+ SIZE_T **prgVal1,
+ SIZE_T **prgVal2,
+ BYTE ***prgpVCs)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(dji));
+ PRECONDITION(CheckPointer(pCtx));
+ PRECONDITION(CheckPointer(prgVal1));
+ PRECONDITION(CheckPointer(prgVal2));
+ PRECONDITION(dji->m_sizeOfCode >= offsetFrom);
+ }
+ CONTRACTL_END;
+
+ LONG cVariables = 0;
+ DWORD i;
+
+ //
+ // Find the largest variable number
+ //
+ for (i = 0; i < dji->GetVarNativeInfoCount(); i++)
+ {
+ if ((LONG)(dji->GetVarNativeInfo()[i].varNumber) > cVariables)
+ {
+ cVariables = (LONG)(dji->GetVarNativeInfo()[i].varNumber);
+ }
+ }
+
+ HRESULT hr = S_OK;
+
+ //
+ // cVariables is a zero-based count of the number of variables. Increment it.
+ //
+ cVariables++;
+
+ SIZE_T *rgVal1 = new (interopsafe, nothrow) SIZE_T[cVariables + unsigned(-ICorDebugInfo::UNKNOWN_ILNUM)];
+
+ SIZE_T *rgVal2 = NULL;
+
+ if (rgVal1 == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ goto LExit;
+ }
+
+ rgVal2 = new (interopsafe, nothrow) SIZE_T[cVariables + unsigned(-ICorDebugInfo::UNKNOWN_ILNUM)];
+
+ if (rgVal2 == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ goto LExit;
+ }
+
+ memset(rgVal1, 0, sizeof(SIZE_T) * (cVariables + unsigned(-ICorDebugInfo::UNKNOWN_ILNUM)));
+ memset(rgVal2, 0, sizeof(SIZE_T) * (cVariables + unsigned(-ICorDebugInfo::UNKNOWN_ILNUM)));
+
+ LOG((LF_CORDB|LF_ENC,
+ LL_INFO10000,
+ "D::SVG cVariables %d, hiddens %d, rgVal1 0x%X, rgVal2 0x%X\n",
+ cVariables,
+ unsigned(-ICorDebugInfo::UNKNOWN_ILNUM),
+ rgVal1,
+ rgVal2));
+
+ GetVariablesFromOffset(dji->m_fd,
+ dji->GetVarNativeInfoCount(),
+ dji->GetVarNativeInfo(),
+ offsetFrom,
+ pCtx,
+ rgVal1,
+ rgVal2,
+ cVariables + unsigned(-ICorDebugInfo::UNKNOWN_ILNUM),
+ prgpVCs);
+
+
+LExit:
+ if (!FAILED(hr))
+ {
+ (*prgVal1) = rgVal1;
+ (*prgVal2) = rgVal2;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO100, "D::SVG: something went wrong hr=0x%x!", hr));
+
+ (*prgVal1) = NULL;
+ (*prgVal2) = NULL;
+
+ if (rgVal1 != NULL)
+ delete[] rgVal1;
+
+ if (rgVal2 != NULL)
+ delete[] rgVal2;
+ }
+
+ return hr;
+}
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+HRESULT Debugger::ShuffleVariablesSet(DebuggerJitInfo *dji,
+ SIZE_T offsetTo,
+ CONTEXT *pCtx,
+ SIZE_T **prgVal1,
+ SIZE_T **prgVal2,
+ BYTE **rgpVCs)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(dji));
+ PRECONDITION(CheckPointer(pCtx));
+ PRECONDITION(CheckPointer(prgVal1));
+ PRECONDITION(CheckPointer(prgVal2));
+ PRECONDITION(dji->m_sizeOfCode >= offsetTo);
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB|LF_ENC,
+ LL_INFO10000,
+ "D::SVS: rgVal1 0x%X, rgVal2 0x%X\n",
+ (*prgVal1),
+ (*prgVal2)));
+
+ HRESULT hr = SetVariablesAtOffset(dji->m_fd,
+ dji->GetVarNativeInfoCount(),
+ dji->GetVarNativeInfo(),
+ offsetTo,
+ pCtx,
+ *prgVal1,
+ *prgVal2,
+ rgpVCs);
+
+ LOG((LF_CORDB|LF_ENC,
+ LL_INFO100000,
+ "D::SVS deleting rgVal1 0x%X, rgVal2 0x%X\n",
+ (*prgVal1),
+ (*prgVal2)));
+
+ DeleteInteropSafe(*prgVal1);
+ (*prgVal1) = NULL;
+ DeleteInteropSafe(*prgVal2);
+ (*prgVal2) = NULL;
+ return hr;
+}
+
+//
+// This class is used by Get and SetVariablesFromOffsets to manage a frameHelper
+// list for the arguments and locals corresponding to each varNativeInfo. The first
+// four are hidden args, but the remainder will all have a corresponding entry
+// in the argument or local signature list.
+//
+// The structure of the array varNativeInfo contains home information for each variable
+// at various points in the function. Thus, you have to search for the proper native offset
+// (IP) in the varNativeInfo, and then find the correct varNumber in that native offset to
+// find the correct home information.
+//
+// Important to note is that the JIT has hidden args that have varNumbers that are negative.
+// Thus we cannot use varNumber as a strict index into our holder arrays, and instead shift
+// indexes before indexing into our holder arrays.
+//
+// The hidden args are a fixed-sized array given by the value of 0-UNKNOWN_ILNUM. These are used
+// to pass cookies about the arguments (var args, generics, retarg buffer etc.) to the function.
+// The real arguments and locals are as one would expect.
+//
+
+class GetSetFrameHelper
+{
+public:
+ GetSetFrameHelper();
+ ~GetSetFrameHelper();
+
+ HRESULT Init(MethodDesc* pMD);
+
+ bool GetValueClassSizeOfVar(int varNum, ICorDebugInfo::VarLocType varType, SIZE_T* pSize);
+ int ShiftIndexForHiddens(int varNum);
+
+private:
+ MethodDesc* m_pMD;
+ SIZE_T* m_rgSize;
+ CorElementType* m_rgElemType;
+ ULONG m_numArgs;
+ ULONG m_numTotalVars;
+
+ SIZE_T GetValueClassSize(MetaSig* pSig);
+
+ static SIZE_T GetSizeOfElement(CorElementType cet);
+};
+
+//
+// GetSetFrameHelper::GetSetFrameHelper()
+//
+// This is the constructor. It just initailizes all member variables.
+//
+// parameters: none
+//
+// return value: none
+//
+GetSetFrameHelper::GetSetFrameHelper() : m_pMD(NULL), m_rgSize(NULL), m_rgElemType(NULL),
+ m_numArgs(0), m_numTotalVars(0)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+//
+// GetSetFrameHelper::Init()
+//
+// This method extracts the element type and the size of the arguments and locals of the method we are doing
+// the SetIP on and stores this information in instance variables.
+//
+// parameters: pMD - MethodDesc of the method we are doing the SetIP on
+//
+// return value: S_OK or E_OUTOFMEMORY
+//
+HRESULT
+GetSetFrameHelper::Init(MethodDesc *pMD)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(pMD));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ COR_ILMETHOD* pILHeader = NULL;
+ m_pMD = pMD;
+ MetaSig *pLocSig = NULL;
+ MetaSig *pArgSig = NULL;
+
+ m_rgSize = NULL;
+ m_rgElemType = NULL;
+
+ // Initialize decoderOldIL before checking the method argument signature.
+ EX_TRY
+ {
+ pILHeader = pMD->GetILHeader();
+ }
+ EX_CATCH_HRESULT(hr);
+ if (FAILED(hr))
+ return hr;
+
+ COR_ILMETHOD_DECODER decoderOldIL(pILHeader);
+ mdSignature mdLocalSig = (decoderOldIL.GetLocalVarSigTok()) ? (decoderOldIL.GetLocalVarSigTok()):
+ (mdSignatureNil);
+
+ PCCOR_SIGNATURE pCallSig;
+ DWORD cbCallSigSize;
+
+ pMD->GetSig(&pCallSig, &cbCallSigSize);
+
+ if (pCallSig != NULL)
+ {
+ // Yes, we do need to pass in the text because this might be generic function!
+ SigTypeContext tmpContext(pMD);
+
+ pArgSig = new (interopsafe, nothrow) MetaSig(pCallSig,
+ cbCallSigSize,
+ pMD->GetModule(),
+ &tmpContext,
+ MetaSig::sigMember);
+
+ if (pArgSig == NULL)
+ {
+ IfFailGo(E_OUTOFMEMORY);
+ }
+
+ m_numArgs = pArgSig->NumFixedArgs();
+
+ if (pArgSig->HasThis())
+ {
+ m_numArgs++;
+ }
+
+ // <TODO>
+ // What should we do in this case?
+ // </TODO>
+ /*
+ if (argSig.IsVarArg())
+ m_numArgs++;
+ */
+ }
+
+ // allocation of pArgSig succeeded
+ ULONG cbSig;
+ PCCOR_SIGNATURE pLocalSig;
+ pLocalSig = NULL;
+ if (mdLocalSig != mdSignatureNil)
+ {
+ IfFailGo(pMD->GetModule()->GetMDImport()->GetSigFromToken(mdLocalSig, &cbSig, &pLocalSig));
+ }
+ if (pLocalSig != NULL)
+ {
+ SigTypeContext tmpContext(pMD);
+ pLocSig = new (interopsafe, nothrow) MetaSig(pLocalSig,
+ cbSig,
+ pMD->GetModule(),
+ &tmpContext,
+ MetaSig::sigLocalVars);
+
+ if (pLocSig == NULL)
+ {
+ IfFailGo(E_OUTOFMEMORY);
+ }
+ }
+
+ // allocation of pLocalSig succeeded
+ m_numTotalVars = m_numArgs + (pLocSig != NULL ? pLocSig->NumFixedArgs() : 0);
+
+ if (m_numTotalVars > 0)
+ {
+ m_rgSize = new (interopsafe, nothrow) SIZE_T[m_numTotalVars];
+ m_rgElemType = new (interopsafe, nothrow) CorElementType[m_numTotalVars];
+
+ if ((m_rgSize == NULL) || (m_rgElemType == NULL))
+ {
+ IfFailGo(E_OUTOFMEMORY);
+ }
+ else
+ {
+ // allocation of m_rgSize and m_rgElemType succeeded
+ for (ULONG i = 0; i < m_numTotalVars; i++)
+ {
+ // Choose the correct signature to walk.
+ MetaSig *pCur = NULL;
+ if (i < m_numArgs)
+ {
+ pCur = pArgSig;
+ }
+ else
+ {
+ pCur = pLocSig;
+ }
+
+ // The "this" argument isn't stored in the signature, so we have to
+ // check for it manually.
+ if (i == 0 && pCur->HasThis())
+ {
+ _ASSERTE(pCur == pArgSig);
+
+ m_rgElemType[i] = ELEMENT_TYPE_CLASS;
+ m_rgSize[i] = sizeof(SIZE_T);
+ }
+ else
+ {
+ m_rgElemType[i] = pCur->NextArg();
+
+ if (m_rgElemType[i] == ELEMENT_TYPE_VALUETYPE)
+ {
+ m_rgSize[i] = GetValueClassSize(pCur);
+ }
+ else
+ {
+ m_rgSize[i] = GetSetFrameHelper::GetSizeOfElement(m_rgElemType[i]);
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "GSFH::I: var 0x%x is of type %x, size:0x%x\n",
+ i, m_rgElemType[i], m_rgSize[i]));
+ }
+ }
+ } // allocation of m_rgSize and m_rgElemType succeeded
+ } // if there are variables to take care of
+
+ErrExit:
+ // clean up
+ if (pArgSig != NULL)
+ {
+ DeleteInteropSafe(pArgSig);
+ }
+
+ if (pLocSig != NULL)
+ {
+ DeleteInteropSafe(pLocSig);
+ }
+
+ if (FAILED(hr))
+ {
+ if (m_rgSize != NULL)
+ {
+ DeleteInteropSafe(m_rgSize);
+ }
+
+ if (m_rgElemType != NULL)
+ {
+ DeleteInteropSafe((int*)m_rgElemType);
+ }
+ }
+
+ return hr;
+} // GetSetFrameHelper::Init
+
+//
+// GetSetFrameHelper::~GetSetFrameHelper()
+//
+// This is the destructor. It checks the two arrays we have allocated and frees the memory accordingly.
+//
+// parameters: none
+//
+// return value: none
+//
+GetSetFrameHelper::~GetSetFrameHelper()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (m_rgSize)
+ {
+ DeleteInteropSafe(m_rgSize);
+ }
+
+ if (m_rgElemType)
+ {
+ DeleteInteropSafe((int*)m_rgElemType);
+ }
+}
+
+//
+// GetSetFrameHelper::GetSizeOfElement()
+//
+// Given a CorElementType, this function returns the size of this type.
+// Note that this function doesn't handle ELEMENT_TYPE_VALUETYPE. Use GetValueClassSize() instead.
+//
+// parameters: cet - the CorElementType of the argument/local we are dealing with
+//
+// return value: the size of the argument/local
+//
+// static
+SIZE_T GetSetFrameHelper::GetSizeOfElement(CorElementType cet)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(cet != ELEMENT_TYPE_VALUETYPE);
+ }
+ CONTRACTL_END;
+
+ if (!CorIsPrimitiveType(cet))
+ {
+ return sizeof(SIZE_T);
+ }
+ else
+ {
+ switch (cet)
+ {
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+#if defined(_WIN64)
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+#endif // _WIN64
+ case ELEMENT_TYPE_R8:
+ return 8;
+
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+#if !defined(_WIN64)
+ case ELEMENT_TYPE_I:
+ case ELEMENT_TYPE_U:
+#endif // !_WIN64
+ case ELEMENT_TYPE_R4:
+ return 4;
+
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR:
+ return 2;
+
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_BOOLEAN:
+ return 1;
+
+ case ELEMENT_TYPE_VOID:
+ case ELEMENT_TYPE_END:
+ _ASSERTE(!"debugger.cpp - Check this code path\n");
+ return 0;
+
+ case ELEMENT_TYPE_STRING:
+ return sizeof(SIZE_T);
+
+ default:
+ _ASSERTE(!"debugger.cpp - Check this code path\n");
+ return sizeof(SIZE_T);
+ }
+ }
+}
+
+//
+// GetSetFrameHelper::GetValueClassSize()
+//
+// Given a MetaSig pointer to the signature of a value type, this function returns its size.
+//
+// parameters: pSig - MetaSig pointer to the signature of a value type
+//
+// return value: the size of this value type
+//
+SIZE_T GetSetFrameHelper::GetValueClassSize(MetaSig* pSig)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pSig));
+ }
+ CONTRACTL_END;
+
+ // We need to determine the number of bytes for this value-type.
+ SigPointer sp = pSig->GetArgProps();
+
+ TypeHandle vcType = TypeHandle();
+ {
+ // Lookup operations run the class loader in non-load mode.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+
+ // This will return Null if type is not restored
+ // @todo : is this what we want?
+ SigTypeContext typeContext(m_pMD);
+ vcType = sp.GetTypeHandleThrowing(m_pMD->GetModule(),
+ &typeContext,
+ // == FailIfNotLoaded
+ ClassLoader::DontLoadTypes);
+ }
+ // We need to know the size of the class in bytes. This means:
+ // - we need a specific instantiation (since that affects size)
+ // - but we don't care if it's shared (since it will be the same size either way)
+ _ASSERTE(!vcType.IsNull() && vcType.IsValueType());
+
+ return (vcType.GetMethodTable()->GetAlignedNumInstanceFieldBytes());
+}
+
+//
+// GetSetFrameHelper::GetValueClassSizeOfVar()
+//
+// This method retrieves the size of the variable saved in the array m_rgSize. Also, it returns true
+// if the variable is a value type.
+//
+// parameters: varNum - the variable number (arguments come before locals)
+// varType - the type of variable home
+// pSize - [out] the size
+//
+// return value: whether this variable is a value type
+//
+bool GetSetFrameHelper::GetValueClassSizeOfVar(int varNum, ICorDebugInfo::VarLocType varType, SIZE_T* pSize)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(varType != ICorDebugInfo::VLT_FIXED_VA);
+ PRECONDITION(pSize != NULL);
+ }
+ CONTRACTL_END;
+
+ // preliminary checking
+ if (varNum < 0)
+ {
+ // Make sure this is one of the secret parameters (e.g. VASigCookie, generics context, etc.).
+ _ASSERTE(varNum > (int)ICorDebugInfo::MAX_ILNUM);
+
+ *pSize = sizeof(LPVOID);
+ return false;
+ }
+
+ // This check is only safe after we make sure that varNum is not negative.
+ if ((UINT)varNum >= m_numTotalVars)
+ {
+ _ASSERTE(!"invalid variable index encountered during setip");
+ *pSize = 0;
+ return false;
+ }
+
+ CorElementType cet = m_rgElemType[varNum];
+ *pSize = m_rgSize[varNum];
+
+ if ((cet != ELEMENT_TYPE_VALUETYPE) ||
+ (varType == ICorDebugInfo::VLT_REG) ||
+ (varType == ICorDebugInfo::VLT_REG_REG) ||
+ (varType == ICorDebugInfo::VLT_REG_STK) ||
+ (varType == ICorDebugInfo::VLT_STK_REG))
+ {
+ return false;
+ }
+ else
+ {
+ return true;
+ }
+}
+
+int GetSetFrameHelper::ShiftIndexForHiddens(int varNum)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ //
+ // Need to shift them up so are appropriate index for rgVal arrays
+ //
+ return varNum - ICorDebugInfo::UNKNOWN_ILNUM;
+}
+
+// Helper method pair to grab all, then set all, variables at a given
+// point in a routine.
+// NOTE: GetVariablesFromOffset and SetVariablesAtOffset are
+// very similar - modifying one will probably need to be reflected in the other...
+// rgVal1 and rgVal2 are preallocated by callers with estimated size.
+// We pass in the size of the allocation in rRgValeSize. The safe index will be rgVal1[0..uRgValSize - 1]
+//
+HRESULT Debugger::GetVariablesFromOffset(MethodDesc *pMD,
+ UINT varNativeInfoCount,
+ ICorDebugInfo::NativeVarInfo *varNativeInfo,
+ SIZE_T offsetFrom,
+ CONTEXT *pCtx,
+ SIZE_T *rgVal1,
+ SIZE_T *rgVal2,
+ UINT uRgValSize, // number of elements of the preallocated rgVal1 and rgVal2
+ BYTE ***rgpVCs)
+{
+ // @todo - convert this to throwing w/ holders. It will be cleaner.
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(rgpVCs));
+ PRECONDITION(CheckPointer(pCtx));
+ PRECONDITION(varNativeInfoCount == 0 || CheckPointer(varNativeInfo));
+ PRECONDITION(varNativeInfoCount == 0 || CheckPointer(rgVal1));
+ PRECONDITION(varNativeInfoCount == 0 || CheckPointer(rgVal2));
+ // This may or may not be called on the helper thread.
+ }
+ CONTRACTL_END;
+
+ *rgpVCs = NULL;
+ // if there are no locals, well, we are done!
+
+ if (varNativeInfoCount == 0)
+ {
+ return S_OK;
+ }
+
+ memset( rgVal1, 0, sizeof(SIZE_T)*uRgValSize);
+ memset( rgVal2, 0, sizeof(SIZE_T)*uRgValSize);
+
+ LOG((LF_CORDB|LF_ENC, LL_INFO10000, "D::GVFO: %s::%s, infoCount:0x%x, from:0x%p\n",
+ pMD->m_pszDebugClassName,
+ pMD->m_pszDebugMethodName,
+ varNativeInfoCount,
+ offsetFrom));
+
+ GetSetFrameHelper frameHelper;
+ HRESULT hr = frameHelper.Init(pMD);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ // preallocate enough to hold all possible valueclass args & locals
+ // sure this is more than we need, but not a big deal and better
+ // than having to crawl through the frameHelper and count
+ ULONG cValueClasses = 0;
+ BYTE **rgpValueClasses = new (interopsafe, nothrow) BYTE *[varNativeInfoCount];
+ if (rgpValueClasses == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ memset(rgpValueClasses, 0, sizeof(BYTE *)*varNativeInfoCount);
+
+ hr = S_OK;
+
+ LOG((LF_CORDB|LF_ENC,
+ LL_INFO10000,
+ "D::GVFO rgVal1 0x%X, rgVal2 0x%X\n",
+ rgVal1,
+ rgVal2));
+
+ // Now go through the full array and save off each arg and local
+ for (UINT i = 0; i< varNativeInfoCount;i++)
+ {
+ // Ignore variables not live at offsetFrom
+ //
+ // #VarLife
+ //
+ // The condition below is a little strange. If a var is alive when this is true:
+ //
+ // startOffset <= offsetFrom < endOffset
+ //
+ // Then you'd expect the negated expression below to be:
+ //
+ // startOffset > offsetFrom || endOffset <= offsetFrom
+ //
+ // instead of what we're doing ("<" instead of "<="):
+ //
+ // startOffset > offsetFrom || endOffset < offsetFrom
+ //
+ // I'm not sure if the condition below is a mistake, or if it's intentionally
+ // mirroring a workaround from FindNativeInfoInILVariableArray() (Debug\DI\module.cpp)
+ // to deal with optimized code. So I'm leaving it alone for now. See
+ // code:FindNativeInfoInILVariableArray for more info on this workaround.
+ if ((varNativeInfo[i].startOffset > offsetFrom) ||
+ (varNativeInfo[i].endOffset < offsetFrom) ||
+ (varNativeInfo[i].loc.vlType == ICorDebugInfo::VLT_INVALID))
+ {
+ LOG((LF_CORDB|LF_ENC,LL_INFO10000, "D::GVFO [%2d] invalid\n", i));
+ continue;
+ }
+
+ SIZE_T cbClass;
+ bool isVC = frameHelper.GetValueClassSizeOfVar(varNativeInfo[i].varNumber,
+ varNativeInfo[i].loc.vlType,
+ &cbClass);
+
+ if (!isVC)
+ {
+ int rgValIndex = frameHelper.ShiftIndexForHiddens(varNativeInfo[i].varNumber);
+
+ _ASSERTE(rgValIndex >= 0 && rgValIndex < (int)uRgValSize);
+
+ BOOL res = GetNativeVarVal(varNativeInfo[i].loc,
+ pCtx,
+ rgVal1 + rgValIndex,
+ rgVal2 + rgValIndex
+ WIN64_ARG(cbClass));
+
+ LOG((LF_CORDB|LF_ENC,LL_INFO10000,
+ "D::GVFO [%2d] varnum %d, nonVC type %x, addr %8.8x: %8.8x;%8.8x\n",
+ i,
+ varNativeInfo[i].varNumber,
+ varNativeInfo[i].loc.vlType,
+ NativeVarStackAddr(varNativeInfo[i].loc, pCtx),
+ rgVal1[rgValIndex],
+ rgVal2[rgValIndex]));
+
+ if (res == TRUE)
+ {
+ continue;
+ }
+
+ _ASSERTE(res == TRUE);
+ hr = E_FAIL;
+ break;
+ }
+
+ // it's definately a value class
+ // Make space for it - note that it uses the VC index, NOT the variable index
+ _ASSERTE(cbClass != 0);
+ rgpValueClasses[cValueClasses] = new (interopsafe, nothrow) BYTE[cbClass];
+ if (rgpValueClasses[cValueClasses] == NULL)
+ {
+ hr = E_OUTOFMEMORY;
+ break;
+ }
+ memcpy(rgpValueClasses[cValueClasses],
+ NativeVarStackAddr(varNativeInfo[i].loc, pCtx),
+ cbClass);
+
+ // Move index up.
+ cValueClasses++;
+#ifdef _DEBUG
+ LOG((LF_CORDB|LF_ENC,LL_INFO10000,
+ "D::GVFO [%2d] varnum %d, VC len %d, addr %8.8x, sample: %8.8x%8.8x\n",
+ i,
+ varNativeInfo[i].varNumber,
+ cbClass,
+ NativeVarStackAddr(varNativeInfo[i].loc, pCtx),
+ (rgpValueClasses[cValueClasses-1])[0], (rgpValueClasses[cValueClasses-1])[1]));
+#endif
+ }
+
+ LOG((LF_CORDB|LF_ENC, LL_INFO10000, "D::GVFO: returning %8.8x\n", hr));
+ if (SUCCEEDED(hr))
+ {
+ (*rgpVCs) = rgpValueClasses;
+ return hr;
+ }
+
+ // We failed for some reason
+ if (rgpValueClasses != NULL)
+ { // free any memory we allocated for VCs here
+ while(cValueClasses > 0)
+ {
+ --cValueClasses;
+ DeleteInteropSafe(rgpValueClasses[cValueClasses]); // OK to delete NULL
+ }
+ DeleteInteropSafe(rgpValueClasses);
+ rgpValueClasses = NULL;
+ }
+ return hr;
+}
+
+// NOTE: GetVariablesFromOffset and SetVariablesAtOffset are
+// very similar - modifying one will probably need to be reflected in the other...
+HRESULT Debugger::SetVariablesAtOffset(MethodDesc *pMD,
+ UINT varNativeInfoCount,
+ ICorDebugInfo::NativeVarInfo *varNativeInfo,
+ SIZE_T offsetTo,
+ CONTEXT *pCtx,
+ SIZE_T *rgVal1,
+ SIZE_T *rgVal2,
+ BYTE **rgpVCs)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pCtx));
+ PRECONDITION(varNativeInfoCount == 0 || CheckPointer(rgpVCs));
+ PRECONDITION(varNativeInfoCount == 0 || CheckPointer(varNativeInfo));
+ PRECONDITION(varNativeInfoCount == 0 || CheckPointer(rgVal1));
+ PRECONDITION(varNativeInfoCount == 0 || CheckPointer(rgVal2));
+ // This may or may not be called on the helper thread.
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB|LF_ENC, LL_INFO10000, "D::SVAO: %s::%s, infoCount:0x%x, to:0x%p\n",
+ pMD->m_pszDebugClassName,
+ pMD->m_pszDebugMethodName,
+ varNativeInfoCount,
+ offsetTo));
+
+ if (varNativeInfoCount == 0)
+ {
+ return S_OK;
+ }
+
+ GetSetFrameHelper frameHelper;
+ HRESULT hr = frameHelper.Init(pMD);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ ULONG iVC = 0;
+ hr = S_OK;
+
+ // Note that since we obtain all the variables in the first loop, we
+ // can now splatter those variables into their new locations
+ // willy-nilly, without the fear that variable locations that have
+ // been swapped might accidentally overwrite a variable value.
+ for (UINT i = 0;i< varNativeInfoCount;i++)
+ {
+ // Ignore variables not live at offsetTo
+ //
+ // If this IF condition looks wrong to you, see
+ // code:Debugger::GetVariablesFromOffset#VarLife for more info
+ if ((varNativeInfo[i].startOffset > offsetTo) ||
+ (varNativeInfo[i].endOffset < offsetTo) ||
+ (varNativeInfo[i].loc.vlType == ICorDebugInfo::VLT_INVALID))
+ {
+ LOG((LF_CORDB|LF_ENC,LL_INFO10000, "D::SVAO [%2d] invalid\n", i));
+ continue;
+ }
+
+ SIZE_T cbClass;
+ bool isVC = frameHelper.GetValueClassSizeOfVar(varNativeInfo[i].varNumber,
+ varNativeInfo[i].loc.vlType,
+ &cbClass);
+
+ if (!isVC)
+ {
+ int rgValIndex = frameHelper.ShiftIndexForHiddens(varNativeInfo[i].varNumber);
+
+ _ASSERTE(rgValIndex >= 0);
+
+ BOOL res = SetNativeVarVal(varNativeInfo[i].loc,
+ pCtx,
+ rgVal1[rgValIndex],
+ rgVal2[rgValIndex]
+ WIN64_ARG(cbClass));
+
+ LOG((LF_CORDB|LF_ENC,LL_INFO10000,
+ "D::SVAO [%2d] varnum %d, nonVC type %x, addr %8.8x: %8.8x;%8.8x\n",
+ i,
+ varNativeInfo[i].varNumber,
+ varNativeInfo[i].loc.vlType,
+ NativeVarStackAddr(varNativeInfo[i].loc, pCtx),
+ rgVal1[rgValIndex],
+ rgVal2[rgValIndex]));
+
+ if (res == TRUE)
+ {
+ continue;
+ }
+ _ASSERTE(res == TRUE);
+ hr = E_FAIL;
+ break;
+ }
+
+ // It's definately a value class.
+ _ASSERTE(cbClass != 0);
+ if (rgpVCs[iVC] == NULL)
+ {
+ // it's new in scope, so just clear it
+ memset(NativeVarStackAddr(varNativeInfo[i].loc, pCtx), 0, cbClass);
+ LOG((LF_CORDB|LF_ENC,LL_INFO10000, "D::SVAO [%2d] varnum %d, new VC len %d, addr %8.8x\n",
+ i,
+ varNativeInfo[i].varNumber,
+ cbClass,
+ NativeVarStackAddr(varNativeInfo[i].loc, pCtx)));
+ continue;
+ }
+ // it's a pre-existing VC, so copy it
+ memmove(NativeVarStackAddr(varNativeInfo[i].loc, pCtx), rgpVCs[iVC], cbClass);
+#ifdef _DEBUG
+ LOG((LF_CORDB|LF_ENC,LL_INFO10000,
+ "D::SVAO [%2d] varnum %d, VC len %d, addr: %8.8x sample: %8.8x%8.8x\n",
+ i,
+ varNativeInfo[i].varNumber,
+ cbClass,
+ NativeVarStackAddr(varNativeInfo[i].loc, pCtx),
+ rgpVCs[iVC][0],
+ rgpVCs[iVC][1]));
+#endif
+ // Now get rid of the memory
+ DeleteInteropSafe(rgpVCs[iVC]);
+ rgpVCs[iVC] = NULL;
+ iVC++;
+ }
+
+ LOG((LF_CORDB|LF_ENC, LL_INFO10000, "D::SVAO: returning %8.8x\n", hr));
+
+ if (rgpVCs != NULL)
+ {
+ DeleteInteropSafe(rgpVCs);
+ }
+
+ return hr;
+}
+
+BOOL IsDuplicatePatch(SIZE_T *rgEntries,
+ ULONG cEntries,
+ SIZE_T Entry )
+{
+ LIMITED_METHOD_CONTRACT;
+
+ for( ULONG i = 0; i < cEntries;i++)
+ {
+ if (rgEntries[i] == Entry)
+ return TRUE;
+ }
+ return FALSE;
+}
+
+
+/******************************************************************************
+// HRESULT Debugger::MapAndBindFunctionBreakpoints(): For each breakpoint
+// that we've set in any version of the existing function,
+// set a correponding breakpoint in the new function if we haven't moved
+// the patch to the new version already.
+//
+// This must be done _AFTER_ the MethodDesc has been udpated
+// with the new address (ie, when GetFunctionAddress pFD returns
+// the address of the new EnC code)
+//
+// Parameters:
+// djiNew - this is the DJI created in D::JitComplete.
+// If djiNew == NULL iff we aren't tracking debug-info.
+// fd - the method desc that we're binding too.
+// addrOfCode - address of the native blob of code we just jitted
+//
+// <TODO>@todo Replace array with hashtable for improved efficiency</TODO>
+// <TODO>@todo Need to factor code,so that we can selectively map forward DFK(ilOFfset) BPs</TODO>
+ ******************************************************************************/
+HRESULT Debugger::MapAndBindFunctionPatches(DebuggerJitInfo *djiNew,
+ MethodDesc * fd,
+ CORDB_ADDRESS_TYPE *addrOfCode)
+{
+ // @@@
+ // Internal helper API. Can be called from Debugger or Controller.
+ //
+
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ CALLED_IN_DEBUGGERDATALOCK_HOLDER_SCOPE_MAY_GC_TRIGGERS_CONTRACT;
+ PRECONDITION(!djiNew || djiNew->m_fd == fd);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ HASHFIND hf;
+ SIZE_T *pidTableEntry = NULL;
+ SIZE_T pidInCaseTableMoves;
+ Module *pModule = g_pEEInterface->MethodDescGetModule(fd);
+ mdMethodDef md = fd->GetMemberDef();
+
+ LOG((LF_CORDB,LL_INFO10000,"D::MABFP: All BPs will be mapped to "
+ "Ver:0x%04x (DJI:0x%08x)\n", djiNew?djiNew->m_methodInfo->GetCurrentEnCVersion():0, djiNew));
+
+ // We need to traverse the patch list while under the controller lock (small lock).
+ // But we can only send BreakpointSetErros while under the debugger lock (big lock).
+ // So to avoid a lock violation, we queue any errors we find under the small lock,
+ // and then send the whole list when under the big lock.
+ PATCH_UNORDERED_ARRAY listUnbindablePatches;
+
+
+ // First lock the patch table so it doesn't move while we're
+ // examining it.
+ LOG((LF_CORDB,LL_INFO10000, "D::MABFP: About to lock patch table\n"));
+ {
+ DebuggerController::ControllerLockHolder ch;
+
+ // Manipulate tables AFTER lock's been acquired.
+ DebuggerPatchTable *pPatchTable = DebuggerController::GetPatchTable();
+ GetBPMappingDuplicates()->Clear(); //dups are tracked per-version
+
+ for (DebuggerControllerPatch *dcp = pPatchTable->GetFirstPatch(&hf);
+ dcp != NULL;
+ dcp = pPatchTable->GetNextPatch( &hf ))
+ {
+
+ LOG((LF_CORDB, LL_INFO10000, "D::MABFP: got patch 0x%p\n", dcp));
+
+ // Only copy over breakpoints that are in this method
+ // Ideally we'd have a per-method index since there can be a lot of patches
+ // when the EnCBreakpoint patches are included.
+ if (dcp->key.module != pModule || dcp->key.md != md)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "Patch not in this method\n"));
+ continue;
+ }
+
+ // Do not copy over slave breakpoint patches. Instead place a new slave
+ // based off the master.
+ if (dcp->IsILSlavePatch())
+ {
+ LOG((LF_CORDB, LL_INFO10000, "Not copying over slave breakpoint patch\n"));
+ continue;
+ }
+
+ // If the patch is already bound, then we don't want to try to rebind it.
+ // Eg. It may be bound to a different generic method instantiation.
+ if (dcp->IsBound())
+ {
+ LOG((LF_CORDB, LL_INFO10000, "Skipping already bound patch\n"));
+ continue;
+ }
+
+ // Only apply breakpoint patches that are for this version.
+ // If the patch doesn't have a particular EnCVersion available from its data then
+ // we're (probably) not tracking JIT info.
+ if (dcp->IsBreakpointPatch() && dcp->HasEnCVersion() && djiNew && dcp->GetEnCVersion() != djiNew->m_encVersion)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "Not applying breakpoint patch to new version\n"));
+ continue;
+ }
+
+ // Only apply breakpoint and stepper patches
+ //
+ // The DJI gets deleted as part of the Unbind/Rebind process in MovedCode.
+ // This is to signal that we should not skip here.
+ // <NICE> under exactly what scenarios (EnC, code pitching etc.) will this apply?... </NICE>
+ // <NICE> can't we be a little clearer about why we don't want to bind the patch in this arcance situation?</NICE>
+ if (dcp->HasDJI() && !dcp->IsBreakpointPatch() && !dcp->IsStepperPatch())
+ {
+ LOG((LF_CORDB, LL_INFO10000, "Neither stepper nor BP but we have valid a DJI (i.e. the DJI hasn't been deleted as part of the Unbind/MovedCode/Rebind mess)! - getting next patch!\n"));
+ continue;
+ }
+
+ // Now check if we're tracking JIT info or not
+ if (djiNew == NULL)
+ {
+ // This means we put a patch in a method w/ no debug info.
+ _ASSERTE(dcp->IsBreakpointPatch() ||
+ dcp->IsStepperPatch() ||
+ dcp->controller->GetDCType() == DEBUGGER_CONTROLLER_THREAD_STARTER);
+
+ // W/o Debug-info, We can only patch native offsets, and only at the start of the method (native offset 0).
+ // <TODO> Why can't we patch other native offsets??
+ // Maybe b/c we don't know if we're patching
+ // in the middle of an instruction. Though that's not a
+ // strict requirement.</TODO>
+ // We can't even do a IL-offset 0 because that's after the prolog and w/o the debug-info,
+ // we don't know where the prolog ends.
+ // Failing this assert is arguably an API misusage - the debugger should have enabled
+ // jit-tracking if they wanted to put bps at offsets other than native:0.
+ if (dcp->IsNativePatch() && (dcp->offset == 0))
+ {
+ DebuggerController::g_patches->BindPatch(dcp, addrOfCode);
+ DebuggerController::ActivatePatch(dcp);
+ }
+ else
+ {
+ // IF a debugger calls EnableJitDebugging(true, ...) in the module-load callback,
+ // we should never get here.
+ *(listUnbindablePatches.AppendThrowing()) = dcp;
+ }
+
+ }
+ else
+ {
+ pidInCaseTableMoves = dcp->pid;
+
+ // If we've already mapped this one to the current version,
+ // don't map it again.
+ LOG((LF_CORDB,LL_INFO10000,"D::MABFP: Checking if 0x%x is a dup...",
+ pidInCaseTableMoves));
+
+ if ( IsDuplicatePatch(GetBPMappingDuplicates()->Table(),
+ GetBPMappingDuplicates()->Count(),
+ pidInCaseTableMoves) )
+ {
+ LOG((LF_CORDB,LL_INFO10000,"it is!\n"));
+ continue;
+ }
+ LOG((LF_CORDB,LL_INFO10000,"nope!\n"));
+
+ // Attempt mapping from patch to new version of code, and
+ // we don't care if it turns out that there isn't a mapping.
+ // <TODO>@todo-postponed: EnC: Make sure that this doesn't cause
+ // the patch-table to shift.</TODO>
+ hr = MapPatchToDJI( dcp, djiNew );
+ if (CORDBG_E_CODE_NOT_AVAILABLE == hr )
+ {
+ *(listUnbindablePatches.AppendThrowing()) = dcp;
+ hr = S_OK;
+ }
+
+ if (FAILED(hr))
+ break;
+
+ //Remember the patch id to prevent duplication later
+ pidTableEntry = GetBPMappingDuplicates()->Append();
+ if (NULL == pidTableEntry)
+ {
+ hr = E_OUTOFMEMORY;
+ break;
+ }
+
+ *pidTableEntry = pidInCaseTableMoves;
+ LOG((LF_CORDB,LL_INFO10000,"D::MABFP Adding 0x%x to list of "
+ "already mapped patches\n", pidInCaseTableMoves));
+ }
+ }
+
+ // unlock controller lock before sending events.
+ }
+ LOG((LF_CORDB,LL_INFO10000, "D::MABFP: Unlocked patch table\n"));
+
+
+ // Now send any Breakpoint bind error events.
+ if (listUnbindablePatches.Count() > 0)
+ {
+ LockAndSendBreakpointSetError(&listUnbindablePatches);
+ }
+
+ return hr;
+}
+
+/******************************************************************************
+// HRESULT Debugger::MapPatchToDJI(): Maps the given
+// patch to the corresponding location at the new address.
+// We assume that the new code has been JITTed.
+// Returns: CORDBG_E_CODE_NOT_AVAILABLE - Indicates that a mapping wasn't
+// available, and thus no patch was placed. The caller may or may
+// not care.
+ ******************************************************************************/
+HRESULT Debugger::MapPatchToDJI( DebuggerControllerPatch *dcp,DebuggerJitInfo *djiTo)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ CALLED_IN_DEBUGGERDATALOCK_HOLDER_SCOPE_MAY_GC_TRIGGERS_CONTRACT;
+ PRECONDITION(djiTo != NULL);
+ PRECONDITION(djiTo->m_jitComplete == true);
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(DebuggerController::HasLock());
+#ifdef _DEBUG
+ static BOOL shouldBreak = -1;
+ if (shouldBreak == -1)
+ shouldBreak = UnsafeGetConfigDWORD(CLRConfig::INTERNAL_DbgBreakOnMapPatchToDJI);
+
+ if (shouldBreak > 0) {
+ _ASSERTE(!"DbgBreakOnMatchPatchToDJI");
+ }
+#endif
+
+ LOG((LF_CORDB, LL_EVERYTHING, "Calling MapPatchToDJI\n"));
+
+ // We shouldn't have been asked to map an already bound patch
+ _ASSERTE( !dcp->IsBound() );
+ if ( dcp->IsBound() )
+ {
+ return S_OK;
+ }
+
+ // If the patch has no DJI then we're doing a UnbindFunctionPatches/RebindFunctionPatches. Either
+ // way, we simply want the most recent version. In the absence of EnC we should have djiCur == djiTo.
+ DebuggerJitInfo *djiCur = dcp->HasDJI() ? dcp->GetDJI() : djiTo;
+ PREFIX_ASSUME(djiCur != NULL);
+
+ // If the source and destination are the same version, then this method
+ // decays into BindFunctionPatch's BindPatch function
+ if (djiCur->m_encVersion == djiTo->m_encVersion)
+ {
+ // If the patch is a "master" then make a new "slave" patch instead of
+ // binding the old one. This is to stop us mucking with the master breakpoint patch
+ // which we may need to bind several times for generic code.
+ if (dcp->IsILMasterPatch())
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "Add, Bind, Activate new patch from master patch\n"));
+ if (dcp->controller->AddBindAndActivateILSlavePatch(dcp, djiTo))
+ {
+ LOG((LF_CORDB, LL_INFO1000, "Add, Bind Activate went fine!\n" ));
+ return S_OK;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO1000, "Didn't work for some reason!\n"));
+
+ // Caller can track this HR and send error.
+ return CORDBG_E_CODE_NOT_AVAILABLE;
+ }
+ }
+ else
+ {
+ // <TODO>
+ // We could actually have a native managed patch here. This patch is probably added
+ // as a result of tracing a patch. See if we can eliminate the need for this code path
+ // </TODO>
+ _ASSERTE( dcp->GetKind() == PATCH_KIND_NATIVE_MANAGED );
+
+ // We have an unbound native patch (eg. for PatchTrace), lets try to bind and activate it
+ dcp->SetDJI(djiTo);
+ LOG((LF_CORDB, LL_EVERYTHING, "trying to bind patch... could be problem\n"));
+ if (DebuggerController::BindPatch(dcp, djiTo->m_fd, NULL))
+ {
+ DebuggerController::ActivatePatch(dcp);
+ LOG((LF_CORDB, LL_INFO1000, "Application went fine!\n" ));
+ return S_OK;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO1000, "Didn't apply for some reason!\n"));
+
+ // Caller can track this HR and send error.
+ return CORDBG_E_CODE_NOT_AVAILABLE;
+ }
+ }
+ }
+
+ // Breakpoint patches never get mapped over
+ _ASSERTE(!dcp->IsBreakpointPatch());
+
+ return S_OK;
+}
+
+//
+// Wrapper function for debugger to WaitForSingleObject. If CLR is hosted,
+// notify host before we leave runtime.
+//
+DWORD Debugger::WaitForSingleObjectHelper(HANDLE handle, DWORD dwMilliseconds)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ DWORD dw = 0;
+ EX_TRY
+ {
+
+ // make sure that we let host know that we are leaving runtime.
+ LeaveRuntimeHolder holder((size_t)(::WaitForSingleObject));
+ dw = ::WaitForSingleObject(handle,dwMilliseconds);
+ }
+ EX_CATCH
+ {
+ // Only possibility to enter here is when Thread::LeaveRuntime
+ // throws exception.
+ dw = WAIT_ABANDONED;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ return dw;
+
+}
+
+
+/* ------------------------------------------------------------------------ *
+ * EE Interface routines
+ * ------------------------------------------------------------------------ */
+
+//
+// SendSyncCompleteIPCEvent sends a Sync Complete event to the Right Side.
+//
+void Debugger::SendSyncCompleteIPCEvent()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(ThreadHoldsLock());
+
+ // Anyone sending the synccomplete must hold the TSL.
+ PRECONDITION(ThreadStore::HoldingThreadStore() || g_fProcessDetach);
+
+ // The sync complete is now only sent on a helper thread.
+ PRECONDITION(ThisIsHelperThreadWorker());
+ MODE_COOPERATIVE;
+
+ // We had better be trapping Runtime threads and not stopped yet.
+ PRECONDITION(m_stopped && m_trappingRuntimeThreads);
+ }
+ CONTRACTL_END;
+
+ // @@@
+ // Internal helper API.
+ // This is to send Sync Complete event to RightSide.
+ // We should have hold the debugger lock
+ //
+
+ STRESS_LOG0(LF_CORDB, LL_INFO10000, "D::SSCIPCE: sync complete.\n");
+
+ // Synchronizing while in in rude shutdown should be extremely rare b/c we don't
+ // TART in rude shutdown. Shutdown must have started after we started to sync.
+ // We know we're not on the shutdown thread here.
+ // And we also know we can't block the shutdown thread (b/c it has the TSL and will
+ // get a free pass through the GC toggles that normally block threads for debugging).
+ if (g_fProcessDetach)
+ {
+ STRESS_LOG0(LF_CORDB, LL_INFO10000, "D::SSCIPCE: Skipping for shutdown.\n");
+ return;
+ }
+
+ // If we're not marked as attached yet, then do that now.
+ // This can be safely called multiple times.
+ // This can happen in the normal attach case. The Right-side sends an async-break,
+ // but we don't want to be considered attach until we've actually gotten our first synchronization.
+ // Else threads may slip forward during attach and send debug events while we're tyring to attach.
+ MarkDebuggerAttachedInternal();
+
+ DebuggerIPCControlBlock * pDCB;
+ pDCB = m_pRCThread->GetDCB();
+ (void)pDCB; //prevent "unused variable" error from GCC
+
+ PREFIX_ASSUME(pDCB != NULL); // must have DCB by the time we're sending IPC events.
+#ifdef FEATURE_INTEROP_DEBUGGING
+ // The synccomplete can't be the first IPC event over. That's b/c the LS needs to know
+ // if we're interop-debugging and the RS needs to know special addresses for interop-debugging
+ // (like flares). All of this info is in the DCB.
+ if (pDCB->m_rightSideIsWin32Debugger)
+ {
+
+ // If the Right Side is the win32 debugger of this process, then we need to throw a special breakpoint exception
+ // here instead of sending the sync complete event. The Right Side treats this the same as a sync complete
+ // event, but its also able to suspend unmanaged threads quickly.
+ // This also prevents races between sending the sync-complete and getting a native debug event
+ // (since the sync-complete becomes a native debug event, and all native debug events are serialized).
+ //
+ // Note: we reset the syncThreadIsLockFree event before sending the sync complete flare. This thread will set
+ // this event once its released the debugger lock. This will prevent the Right Side from suspending this thread
+ // until it has released the debugger lock.
+ Debugger::NotifyRightSideOfSyncComplete();
+ }
+ else
+#endif // FEATURE_INTEROP_DEBUGGING
+ {
+ STRESS_LOG0(LF_CORDB, LL_EVERYTHING, "GetIPCEventSendBuffer called in SendSyncCompleteIPCEvent\n");
+ // Send the Sync Complete event to the Right Side
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce, DB_IPCE_SYNC_COMPLETE);
+
+ m_pRCThread->SendIPCEvent();
+ }
+}
+
+//
+// Lookup or create a DebuggerModule for the given pDomainFile.
+//
+// Arguments:
+// pDomainFile - non-null domain file.
+//
+// Returns:
+// DebuggerModule instance for the given domain file. May be lazily created.
+//
+// Notes:
+// @dbgtodo JMC - this should go away when we get rid of DebuggerModule.
+//
+
+DebuggerModule * Debugger::LookupOrCreateModule(DomainFile * pDomainFile)
+{
+ _ASSERTE(pDomainFile != NULL);
+ LOG((LF_CORDB, LL_INFO1000, "D::LOCM df=0x%x\n", pDomainFile));
+ DebuggerModule * pDModule = LookupOrCreateModule(pDomainFile->GetModule(), pDomainFile->GetAppDomain());
+ LOG((LF_CORDB, LL_INFO1000, "D::LOCM m=0x%x ad=0x%x -> dm=0x%x\n", pDomainFile->GetModule(), pDomainFile->GetAppDomain(), pDModule));
+ _ASSERTE(pDModule != NULL);
+ _ASSERTE(pDModule->GetDomainFile() == pDomainFile);
+
+ return pDModule;
+}
+
+// Overloaded Wrapper around for VMPTR_DomainFile-->DomainFile*
+//
+// Arguments:
+// vmDomainFile - VMPTR cookie for a domain file. This can be NullPtr().
+//
+// Returns:
+// Debugger Module instance for the given domain file. May be lazily created.
+//
+// Notes:
+// VMPTR comes from IPC events
+DebuggerModule * Debugger::LookupOrCreateModule(VMPTR_DomainFile vmDomainFile)
+{
+ DomainFile * pDomainFile = vmDomainFile.GetRawPtr();
+ if (pDomainFile == NULL)
+ {
+ return NULL;
+ }
+ return LookupOrCreateModule(pDomainFile);
+}
+
+// Lookup or create a DebuggerModule for the given (Module, AppDomain) pair.
+//
+// Arguments:
+// pModule - required runtime module. May be domain netural.
+// pAppDomain - required appdomain that the module is in.
+//
+// Returns:
+// Debugger Module isntance for the given domain file. May be lazily created.
+//
+DebuggerModule* Debugger::LookupOrCreateModule(Module* pModule, AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000, "D::LOCM m=0x%x ad=0x%x\n", pModule, pAppDomain));
+
+ // DebuggerModules are relative to a specific AppDomain so we should always be looking up a module /
+ // AppDomain pair.
+ _ASSERTE( pModule != NULL );
+ _ASSERTE( pAppDomain != NULL );
+
+ // This is called from all over. We just need to lock in order to lookup. We don't need
+ // the lock when actually using the DebuggerModule (since it won't be unloaded as long as there is a thread
+ // in that appdomain). Many of our callers already have this lock, many don't.
+ // We can take the lock anyways because it's reentrant.
+ DebuggerDataLockHolder ch(g_pDebugger); // need to traverse module list
+
+ // if this is a module belonging to the system assembly, then scan
+ // the complete list of DebuggerModules looking for the one
+ // with a matching appdomain id
+ // it.
+
+ _ASSERTE( SystemDomain::SystemAssembly()->IsDomainNeutral() );
+
+ DebuggerModule* dmod = NULL;
+
+ if (m_pModules != NULL)
+ {
+ if (pModule->GetAssembly()->IsDomainNeutral())
+ {
+ // We have to make sure to lookup the module with the app domain parameter if the module lives in a shared assembly
+ dmod = m_pModules->GetModule(pModule, pAppDomain);
+ }
+ else
+ {
+ dmod = m_pModules->GetModule(pModule);
+ }
+ }
+
+ // If it doesn't exist, create it.
+ if (dmod == NULL)
+ {
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ DomainFile * pDomainFile = pModule->FindDomainFile(pAppDomain);
+ SIMPLIFYING_ASSUMPTION(pDomainFile != NULL);
+ dmod = AddDebuggerModule(pDomainFile); // throws
+ }
+ EX_CATCH_HRESULT(hr);
+ SIMPLIFYING_ASSUMPTION(dmod != NULL); // may not be true in OOM cases; but LS doesn't handle OOM.
+ }
+
+ // The module must be in the AppDomain that was requested
+ _ASSERTE( (dmod == NULL) || (dmod->GetAppDomain() == pAppDomain) );
+
+ LOG((LF_CORDB, LL_INFO1000, "D::LOCM m=0x%x ad=0x%x -> dm=0x%x\n", pModule, pAppDomain, dmod));
+ return dmod;
+}
+
+// Create a new DebuggerModule object
+//
+// Arguments:
+// pDomainFile- runtime domain file to create debugger module object around
+//
+// Returns:
+// New instnace of a DebuggerModule. Throws on failure.
+//
+DebuggerModule* Debugger::AddDebuggerModule(DomainFile * pDomainFile)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000, "D::ADM df=0x%x\n", pDomainFile));
+ DebuggerDataLockHolder chInfo(this);
+
+ Module * pRuntimeModule = pDomainFile->GetCurrentModule();
+ AppDomain * pAppDomain = pDomainFile->GetAppDomain();
+
+ HRESULT hr = CheckInitModuleTable();
+ IfFailThrow(hr);
+
+ DebuggerModule* pModule = new (interopsafe) DebuggerModule(pRuntimeModule, pDomainFile, pAppDomain);
+ _ASSERTE(pModule != NULL); // throws on oom
+
+ TRACE_ALLOC(pModule);
+
+ m_pModules->AddModule(pModule); // throws
+ // @dbgtodo inspection/exceptions - this may leak module in OOM case. LS is not OOM resilient; and we
+ // expect to get rid of DebuggerModule anyways.
+
+ LOG((LF_CORDB, LL_INFO1000, "D::ADM df=0x%x -> dm=0x%x\n", pDomainFile, pModule));
+ return pModule;
+}
+
+//
+// TrapAllRuntimeThreads causes every Runtime thread that is executing
+// in the EE to trap and send the at safe point event to the RC thread as
+// soon as possible. It also sets the EE up so that Runtime threads that
+// are outside of the EE will trap when they try to re-enter.
+//
+// @TODO::
+// Neither pDbgLockHolder nor pAppDomain are used.
+void Debugger::TrapAllRuntimeThreads()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+
+ // We acquired the lock b/c we're in a scope between LFES & UFES.
+ PRECONDITION(ThreadHoldsLock());
+
+ // This should never be called on a Temporary Helper thread.
+ PRECONDITION(IsDbgHelperSpecialThread() ||
+ (g_pEEInterface->GetThread() == NULL) ||
+ !g_pEEInterface->IsPreemptiveGCDisabled());
+ }
+ CONTRACTL_END;
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(!useTransport)
+ {
+#endif
+ // Only sync if RS requested it.
+ if (!m_RSRequestedSync)
+ {
+ return;
+ }
+ m_RSRequestedSync = FALSE;
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+#endif
+
+ // If we're doing shutdown, then don't bother trying to communicate w/ the RS.
+ // If we're not the thread doing shutdown, then we may be asynchronously killed by the OS.
+ // If we are the thread in shutdown, don't TART b/c that may block and do complicated stuff.
+ if (g_fProcessDetach)
+ {
+ STRESS_LOG0(LF_CORDB, LL_INFO10000, "D::TART: Skipping for shutdown.\n");
+ return;
+ }
+
+
+ // Only try to start trapping if we're not already trapping.
+ if (m_trappingRuntimeThreads == FALSE)
+ {
+ bool fSuspended;
+
+ STRESS_LOG0(LF_CORDB, LL_INFO10000, "D::TART: Trapping all Runtime threads.\n");
+
+ // There's no way that we should be stopped and still trying to call this function.
+ _ASSERTE(!m_stopped);
+
+ // Mark that we're trapping now.
+ m_trappingRuntimeThreads = TRUE;
+
+ // Take the thread store lock.
+ STRESS_LOG0(LF_CORDB,LL_INFO1000, "About to lock thread Store\n");
+ ThreadSuspend::LockThreadStore(ThreadSuspend::SUSPEND_FOR_DEBUGGER);
+ STRESS_LOG0(LF_CORDB,LL_INFO1000, "Locked thread store\n");
+
+ // We start the suspension here, and let the helper thread finish it.
+ // If there's no helper thread, then we need to do helper duty.
+ {
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ fSuspended = g_pEEInterface->StartSuspendForDebug(NULL, TRUE);
+ }
+
+ // We tell the RC Thread to check for other threads now and then and help them get synchronized. (This
+ // is similar to what is done when suspending threads for GC with the HandledJITCase() function.)
+
+ // This does not block.
+ // Pinging this will waken the helper thread (or temp H. thread) and tell it to sweep & send
+ // the sync complete.
+ m_pRCThread->WatchForStragglers();
+
+ // It's possible we may not have a real helper thread.
+ // - on startup in dllmain, helper is blocked on DllMain loader lock.
+ // - on shutdown, helper has been removed on us.
+ // In those cases, we need somebody to send the sync-complete, and handle
+ // managed events, and wait for the continue. So we pretend to be the helper thread.
+ STRESS_LOG0(LF_CORDB, LL_EVERYTHING, "D::SSCIPCE: Calling IsRCThreadReady()\n");
+
+ // We must check the helper thread status while under the lock.
+ _ASSERTE(ThreadHoldsLock());
+ // If we failed to suspend, then that means we must have multiple managed threads.
+ // That means that our helper is not blocked on starting up, thus we can wait infinite on it.
+ // Thus we don't need to do helper duty if the suspend fails.
+ bool fShouldDoHelperDuty = !m_pRCThread->IsRCThreadReady() && fSuspended;
+ if (fShouldDoHelperDuty && !g_fProcessDetach)
+ {
+ // In V1.0, we had the assumption that if the helper thread isn't ready yet, then we're in
+ // a state that SuspendForDebug will succeed on the first try, and thus we'll
+ // never call Sweep when doing helper thread duty.
+ _ASSERTE(fSuspended);
+
+ // This call will do a ton of work, it will toggle the lock,
+ // and it will block until we receive a continue!
+ DoHelperThreadDuty();
+
+ // We will have released the TSL after the call to continue.
+ }
+ else
+ {
+ // We have a live and active helper thread which will handle events
+ // from the RS now that we're stopped.
+ // We need to release the TSL which we acquired above. (The helper will
+ // likely take this lock while doing stuff).
+ STRESS_LOG0(LF_CORDB,LL_INFO1000, "About to unlock thread store!\n");
+ ThreadSuspend::UnlockThreadStore(FALSE, ThreadSuspend::SUSPEND_FOR_DEBUGGER);
+ STRESS_LOG0(LF_CORDB,LL_INFO1000, "TART: Unlocked thread store!\n");
+ }
+ _ASSERTE(ThreadHoldsLock()); // still hold the lock. (though it may have been toggled)
+ }
+}
+
+
+//
+// ReleaseAllRuntimeThreads releases all Runtime threads that may be
+// stopped after trapping and sending the at safe point event.
+//
+void Debugger::ReleaseAllRuntimeThreads(AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ // We acquired the lock b/c we're in a scope between LFES & UFES.
+ PRECONDITION(ThreadHoldsLock());
+
+ // Currently, this is only done on a helper thread.
+ PRECONDITION(ThisIsHelperThreadWorker());
+
+ // Make sure that we were stopped...
+ PRECONDITION(m_trappingRuntimeThreads && m_stopped);
+ }
+ CONTRACTL_END;
+
+ //<TODO>@todo APPD if we want true isolation, remove this & finish the work</TODO>
+ pAppDomain = NULL;
+
+ STRESS_LOG1(LF_CORDB, LL_INFO10000, "D::RART: Releasing all Runtime threads"
+ "for AppD 0x%x.\n", pAppDomain);
+
+ // Mark that we're on our way now...
+ m_trappingRuntimeThreads = FALSE;
+ m_stopped = FALSE;
+
+ // Go ahead and resume the Runtime threads.
+ g_pEEInterface->ResumeFromDebug(pAppDomain);
+}
+
+// Given a method, get's its EnC version number. 1 if the method is not EnCed.
+// Note that MethodDescs are reused between versions so this will give us
+// the most recent EnC number.
+int Debugger::GetMethodEncNumber(MethodDesc * pMethod)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ DebuggerJitInfo * dji = GetLatestJitInfoFromMethodDesc(pMethod);
+ if (dji == NULL)
+ {
+ // If there's no DJI, couldn't have been EnCed.
+ return 1;
+ }
+ return (int) dji->m_encVersion;
+}
+
+
+bool Debugger::IsJMCMethod(Module* pModule, mdMethodDef tkMethod)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CORDebuggerAttached());
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ Crst crstDbg(CrstIsJMCMethod, CRST_UNSAFE_ANYMODE);
+ PRECONDITION(crstDbg.IsSafeToTake());
+#endif
+
+ DebuggerMethodInfo *pInfo = GetOrCreateMethodInfo(pModule, tkMethod);
+
+ if (pInfo == NULL)
+ return false;
+
+ return pInfo->IsJMCFunction();
+}
+
+/******************************************************************************
+ * Called by Runtime when on a 1st chance Native Exception.
+ * This is likely when we hit a breakpoint / single-step.
+ * This is called for all native exceptions (except COM+) on managed threads,
+ * regardless of whether the debugger is attached.
+ ******************************************************************************/
+bool Debugger::FirstChanceNativeException(EXCEPTION_RECORD *exception,
+ CONTEXT *context,
+ DWORD code,
+ Thread *thread)
+{
+
+ // @@@
+ // Implement DebugInterface
+ // Can be called from EE exception code. Or from our M2UHandoffHijackFilter
+ // must be on managed thread.
+
+ CONTRACTL
+ {
+ SO_TOLERANT;
+ NOTHROW;
+
+ // No clear GC_triggers semantics here. See DispatchNativeException.
+ WRAPPER(GC_TRIGGERS);
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(exception));
+ PRECONDITION(CheckPointer(context));
+ PRECONDITION(CheckPointer(thread));
+ }
+ CONTRACTL_END;
+
+
+ // Ignore any notification exceptions sent from code:Debugger.SendRawEvent.
+ // This is not a common case, but could happen in some cases described
+ // in SendRawEvent. Either way, Left-Side and VM should just ignore these.
+ if (IsEventDebuggerNotification(exception, PTR_TO_CORDB_ADDRESS(g_pMSCorEE)))
+ {
+ return true;
+ }
+
+ bool retVal;
+
+ // Don't stop for native debugging anywhere inside our inproc-Filters.
+ CantStopHolder hHolder;
+
+ if (!CORDBUnrecoverableError(this))
+ {
+ retVal = DebuggerController::DispatchNativeException(exception, context,
+ code, thread);
+ }
+ else
+ {
+ retVal = false;
+ }
+
+ return retVal;
+}
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+PRD_TYPE Debugger::GetPatchedOpcode(CORDB_ADDRESS_TYPE *ip)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!CORDBUnrecoverableError(this))
+ {
+ return DebuggerController::GetPatchedOpcode(ip);
+ }
+ else
+ {
+ PRD_TYPE mt;
+ InitializePRD(&mt);
+ return mt;
+ }
+}
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+BOOL Debugger::CheckGetPatchedOpcode(CORDB_ADDRESS_TYPE *address, /*OUT*/ PRD_TYPE *pOpcode)
+{
+ WRAPPER_NO_CONTRACT;
+ CONSISTENCY_CHECK(CheckPointer(address));
+ CONSISTENCY_CHECK(CheckPointer(pOpcode));
+
+ if (CORDebuggerAttached() && !CORDBUnrecoverableError(this))
+ {
+ return DebuggerController::CheckGetPatchedOpcode(address, pOpcode);
+ }
+ else
+ {
+ InitializePRD(pOpcode);
+ return FALSE;
+ }
+}
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+void Debugger::TraceCall(const BYTE *code)
+{
+ CONTRACTL
+ {
+ // We're being called right before we call managed code. Can't trigger
+ // because there may be unprotected args on the stack.
+ MODE_COOPERATIVE;
+ GC_NOTRIGGER;
+
+ NOTHROW;
+ }
+ CONTRACTL_END;
+
+
+ Thread * pCurThread = g_pEEInterface->GetThread();
+ // Ensure we never even think about running managed code on the helper thread.
+ _ASSERTE(!ThisIsHelperThreadWorker() || !"You're running managed code on the helper thread");
+
+ // One threat is that our helper thread may be forced to execute a managed DLL main.
+ // In that case, it's before the helper thread proc is even executed, so our conventional
+ // IsHelperThread() checks are inadequate.
+ _ASSERTE((GetCurrentThreadId() != g_pRCThread->m_DbgHelperThreadOSTid) || !"You're running managed code on the helper thread");
+
+ _ASSERTE((g_pEEInterface->GetThreadFilterContext(pCurThread) == NULL) || !"Shouldn't run managed code w/ Filter-Context set");
+
+ if (!CORDBUnrecoverableError(this))
+ {
+ // There are situations where our callers can't tolerate us throwing.
+ EX_TRY
+ {
+ // Since we have a try catch and the debugger code can deal properly with
+ // faults occuring inside DebuggerController::DispatchTraceCall, we can safely
+ // establish a FAULT_NOT_FATAL region. This is required since some callers can't
+ // tolerate faults.
+ FAULT_NOT_FATAL();
+
+ DebuggerController::DispatchTraceCall(pCurThread, code);
+ }
+ EX_CATCH
+ {
+ // We're being called for our benefit, not our callers. So if we fail,
+ // they don't care.
+ // Failure for us means that some steppers may miss their notification
+ // for entering managed code.
+ LOG((LF_CORDB, LL_INFO10000, "Debugger::TraceCall - inside catch, %p\n", code));
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+}
+
+/******************************************************************************
+ * For Just-My-Code (aka Just-User-Code).
+ * Invoked from a probe in managed code when we enter a user method and
+ * the flag (set by GetJMCFlagAddr) for that method is != 0.
+ * pIP - the ip within the method, right after the prolog.
+ * sp - stack pointer (frame pointer on x86) for the managed method we're entering.
+ * bsp - backing store pointer for the managed method we're entering
+ ******************************************************************************/
+void Debugger::OnMethodEnter(void * pIP)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000000, "D::OnMethodEnter(ip=%p)\n", pIP));
+
+ if (!CORDebuggerAttached())
+ {
+ LOG((LF_CORDB, LL_INFO1000000, "D::OnMethodEnter returning since debugger attached.\n"));
+ return;
+ }
+ FramePointer fp = LEAF_MOST_FRAME;
+ DebuggerController::DispatchMethodEnter(pIP, fp);
+}
+/******************************************************************************
+ * GetJMCFlagAddr
+ * Provide an address of the flag that the JMC probes use to decide whether
+ * or not to call TriggerMethodEnter.
+ * Called for each method that we jit.
+ * md - method desc for the JMC probe
+ * returns an address of a flag that the probe can use.
+ ******************************************************************************/
+DWORD* Debugger::GetJMCFlagAddr(Module * pModule)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ PRECONDITION(CheckPointer(pModule));
+ }
+ CONTRACTL_END;
+
+ // This callback will be invoked whenever we jit debuggable code.
+ // A debugger may not be attached yet, but we still need someplace
+ // to store this dword.
+ // Use the EE's module, because it's always around, even if a debugger
+ // is attached or not.
+ return &(pModule->m_dwDebuggerJMCProbeCount);
+}
+
+/******************************************************************************
+ * Updates the JMC flag on all the EE modules.
+ * We can do this as often as we'd like - though it's a perf hit.
+ ******************************************************************************/
+void Debugger::UpdateAllModuleJMCFlag(bool fStatus)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000000, "D::UpdateModuleJMCFlag to %d\n", fStatus));
+
+ _ASSERTE(HasDebuggerDataLock());
+
+ // Loop through each module.
+ // The module table is lazily allocated. As soon as we set JMC status on any module, that will cause an
+ // allocation of the module table. So if the table isn't allocated no module has JMC set,
+ // and so there is nothing to update.
+ if (m_pModules != NULL)
+ {
+ HASHFIND f;
+ for (DebuggerModule * m = m_pModules->GetFirstModule(&f);
+ m != NULL;
+ m = m_pModules->GetNextModule(&f))
+ {
+ // the primary module may get called multiple times, but that's ok.
+ UpdateModuleJMCFlag(m->GetRuntimeModule(), fStatus);
+ } // end for all modules.
+ }
+}
+
+/******************************************************************************
+ * Updates the JMC flag on the given Primary module
+ * We can do this as often as we'd like - though it's a perf hit.
+ * If we've only changed methods in a single module, then we can just call this.
+ * If we do a more global thing (Such as enable MethodEnter), then that could
+ * affect all modules, so we use the UpdateAllModuleJMCFlag helper.
+ ******************************************************************************/
+void Debugger::UpdateModuleJMCFlag(Module * pRuntimeModule, bool fStatus)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(HasDebuggerDataLock());
+
+
+ DWORD * pFlag = &(pRuntimeModule->m_dwDebuggerJMCProbeCount);
+ _ASSERTE(pFlag != NULL);
+
+ if (pRuntimeModule->HasAnyJMCFunctions())
+ {
+ // If this is a user-code module, then update the JMC flag
+ // the probes look at so that we get MethodEnter callbacks.
+ *pFlag = fStatus;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "D::UpdateModuleJMCFlag, module %p is user code\n", pRuntimeModule));
+ } else {
+ LOG((LF_CORDB, LL_EVERYTHING, "D::UpdateModuleJMCFlag, module %p is not-user code\n", pRuntimeModule));
+
+ // if non-user code, flag should be 0 so that we don't waste
+ // cycles in the callbacks.
+ _ASSERTE(*pFlag == 0);
+ }
+}
+
+// This sets the JMC status for the entire module.
+// fStatus - default status for whole module
+void Debugger::SetModuleDefaultJMCStatus(Module * pRuntimeModule, bool fStatus)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(ThisIsHelperThreadWorker());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO100000, "DM::SetJMCStatus, status=%d, this=%p\n", fStatus, this));
+
+ // Ensure that all active DMIs have our status.
+ // All new DMIs can lookup their status from us.
+ // This should also update the module count of active JMC DMI's.
+ DebuggerMethodInfoTable * pTable = g_pDebugger->GetMethodInfoTable();
+
+ if (pTable != NULL)
+ {
+ Debugger::DebuggerDataLockHolder debuggerDataLockHolder(g_pDebugger);
+ HASHFIND info;
+
+ for (DebuggerMethodInfo *dmi = pTable->GetFirstMethodInfo(&info);
+ dmi != NULL;
+ dmi = pTable->GetNextMethodInfo(&info))
+ {
+ if (dmi->GetRuntimeModule() == pRuntimeModule)
+ {
+ // This DMI is in this module, so update its status
+ dmi->SetJMCStatus(fStatus);
+ }
+ }
+ }
+
+ pRuntimeModule->SetJMCStatus(fStatus);
+
+#ifdef _DEBUG
+ // If we're disabling JMC in this module, then we shouldn't
+ // have any active JMC functions.
+ if (!fStatus)
+ {
+ _ASSERTE(!pRuntimeModule->HasAnyJMCFunctions());
+ }
+#endif
+}
+
+/******************************************************************************
+ * Called by GC to determine if it's safe to do a GC.
+ ******************************************************************************/
+bool Debugger::ThreadsAtUnsafePlaces(void)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // If we're in shutdown mode, then all other threads are parked.
+ // Even if they claim to be at unsafe regions, they're still safe to do a GC. They won't touch
+ // their stacks.
+ if (m_fShutdownMode)
+ {
+ if (m_threadsAtUnsafePlaces > 0)
+ {
+ STRESS_LOG1(LF_CORDB, LL_INFO10000, "D::TAUP: Claiming safety in shutdown mode.%d\n", m_threadsAtUnsafePlaces);
+ }
+ return false;
+ }
+
+
+ return (m_threadsAtUnsafePlaces != 0);
+}
+
+//
+// SendBreakpoint is called by Runtime threads to send that they've
+// hit a breakpoint to the Right Side.
+//
+void Debugger::SendBreakpoint(Thread *thread, CONTEXT *context,
+ DebuggerBreakpoint *breakpoint)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+#ifdef _DEBUG
+ static BOOL shouldBreak = -1;
+ if (shouldBreak == -1)
+ shouldBreak = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgBreakOnSendBreakpoint);
+
+ if (shouldBreak > 0) {
+ _ASSERTE(!"DbgBreakOnSendBreakpoint");
+ }
+#endif
+
+ LOG((LF_CORDB, LL_INFO10000, "D::SB: breakpoint BP:0x%x\n", breakpoint));
+
+ _ASSERTE((g_pEEInterface->GetThread() &&
+ !g_pEEInterface->GetThread()->m_fPreemptiveGCDisabled) ||
+ g_fInControlC);
+
+ _ASSERTE(ThreadHoldsLock());
+
+ // Send a breakpoint event to the Right Side
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce,
+ DB_IPCE_BREAKPOINT,
+ thread,
+ thread->GetDomain());
+ ipce->BreakpointData.breakpointToken.Set(breakpoint);
+ _ASSERTE( breakpoint->m_pAppDomain == ipce->vmAppDomain.GetRawPtr());
+
+ m_pRCThread->SendIPCEvent();
+}
+
+
+//---------------------------------------------------------------------------------------
+// Send a user breakpoint event for this thread and sycnhronize the process.
+//
+// Arguments:
+// pThread - non-null thread to send user breakpoint event for.
+//
+// Notes:
+// Can't assume that a debugger is attached (since it may detach before we get the lock).
+void Debugger::SendUserBreakpointAndSynchronize(Thread * pThread)
+{
+ AtSafePlaceHolder unsafePlaceHolder(pThread);
+
+ SENDIPCEVENT_BEGIN(this, pThread);
+
+ // Actually send the event
+ if (CORDebuggerAttached())
+ {
+ SendRawUserBreakpoint(pThread);
+ TrapAllRuntimeThreads();
+ }
+
+ SENDIPCEVENT_END;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// SendRawUserBreakpoint is called by Runtime threads to send that
+// they've hit a user breakpoint to the Right Side. This is the event
+// send only part, since it can be called from a few different places.
+//
+// Arguments:
+// pThread - [in] managed thread where user break point takes place.
+// mus be curernt thread.
+//
+void Debugger::SendRawUserBreakpoint(Thread * pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+
+ PRECONDITION(pThread == GetThread());
+
+ PRECONDITION(ThreadHoldsLock());
+
+ // Debugger must have been attached to get us to this point.
+ // We hold the Debugger-lock, so debugger could not have detached from
+ // underneath us either.
+ PRECONDITION(CORDebuggerAttached());
+ }
+ CONTRACTL_END;
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::SRUB: user breakpoint\n"));
+
+
+
+ // Send a breakpoint event to the Right Side
+ DebuggerIPCEvent* pEvent = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(pEvent,
+ DB_IPCE_USER_BREAKPOINT,
+ pThread,
+ pThread->GetDomain());
+
+ m_pRCThread->SendIPCEvent();
+}
+
+//
+// SendInterceptExceptionComplete is called by Runtime threads to send that
+// they've completed intercepting an exception to the Right Side. This is the event
+// send only part, since it can be called from a few different places.
+//
+void Debugger::SendInterceptExceptionComplete(Thread *thread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::SIEC: breakpoint\n"));
+
+ _ASSERTE(!g_pEEInterface->IsPreemptiveGCDisabled());
+ _ASSERTE(ThreadHoldsLock());
+
+ // Send a breakpoint event to the Right Side
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce,
+ DB_IPCE_INTERCEPT_EXCEPTION_COMPLETE,
+ thread,
+ thread->GetDomain());
+
+ m_pRCThread->SendIPCEvent();
+}
+
+
+
+//
+// SendStep is called by Runtime threads to send that they've
+// completed a step to the Right Side.
+//
+void Debugger::SendStep(Thread *thread, CONTEXT *context,
+ DebuggerStepper *stepper,
+ CorDebugStepReason reason)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::SS: step:token:0x%p reason:0x%x\n",
+ stepper, reason));
+
+ _ASSERTE((g_pEEInterface->GetThread() &&
+ !g_pEEInterface->GetThread()->m_fPreemptiveGCDisabled) ||
+ g_fInControlC);
+
+ _ASSERTE(ThreadHoldsLock());
+
+ // Send a step event to the Right Side
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce,
+ DB_IPCE_STEP_COMPLETE,
+ thread,
+ thread->GetDomain());
+ ipce->StepData.stepperToken.Set(stepper);
+ ipce->StepData.reason = reason;
+ m_pRCThread->SendIPCEvent();
+}
+
+//-------------------------------------------------------------------------------------------------
+// Send an EnC remap opportunity and block until it is continued.
+//
+// dji - current method information
+// currentIP - IL offset within that method
+// resumeIP - address of a SIZE_T that the RS will write to cross-process if they take the
+// remap opportunity. *resumeIP is untouched if the RS does not remap.
+//-------------------------------------------------------------------------------------------------
+void Debugger::LockAndSendEnCRemapEvent(DebuggerJitInfo * dji, SIZE_T currentIP, SIZE_T *resumeIP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS; // From SendIPCEvent
+ PRECONDITION(dji != NULL);
+ }
+ CONTRACTL_END;
+
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LASEnCRE:\n"));
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ MethodDesc * pFD = dji->m_fd;
+
+ // Note that the debugger lock is reentrant, so we may or may not hold it already.
+ Thread *thread = g_pEEInterface->GetThread();
+ SENDIPCEVENT_BEGIN(this, thread);
+
+ // Send an EnC remap event to the Right Side.
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce,
+ DB_IPCE_ENC_REMAP,
+ thread,
+ thread->GetDomain());
+
+ ipce->EnCRemap.currentVersionNumber = dji->m_encVersion;
+ ipce->EnCRemap.resumeVersionNumber = dji->m_methodInfo->GetCurrentEnCVersion();;
+ ipce->EnCRemap.currentILOffset = currentIP;
+ ipce->EnCRemap.resumeILOffset = resumeIP;
+ ipce->EnCRemap.funcMetadataToken = pFD->GetMemberDef();
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LASEnCRE: token 0x%x, from version %d to %d\n",
+ ipce->EnCRemap.funcMetadataToken, ipce->EnCRemap.currentVersionNumber, ipce->EnCRemap.resumeVersionNumber));
+
+ Module *pRuntimeModule = pFD->GetModule();
+
+ DebuggerModule * pDModule = LookupOrCreateModule(pRuntimeModule, thread->GetDomain());
+ ipce->EnCRemap.vmDomainFile.SetRawPtr((pDModule ? pDModule->GetDomainFile() : NULL));
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LASEnCRE: %s::%s "
+ "dmod:0x%x, methodDef:0x%x \n",
+ pFD->m_pszDebugClassName, pFD->m_pszDebugMethodName,
+ pDModule,
+ ipce->EnCRemap.funcMetadataToken));
+
+ // IPC event is now initialized, so we can send it over.
+ SendSimpleIPCEventAndBlock();
+
+ // This will block on the continue
+ SENDIPCEVENT_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LASEnCRE: done\n"));
+
+}
+
+// Send the RemapComplete event and block until the debugger Continues
+// pFD - specifies the method in which we've remapped into
+void Debugger::LockAndSendEnCRemapCompleteEvent(MethodDesc *pFD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LASEnCRE:\n"));
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ Thread *thread = g_pEEInterface->GetThread();
+ // Note that the debugger lock is reentrant, so we may or may not hold it already.
+ SENDIPCEVENT_BEGIN(this, thread);
+
+ EX_TRY
+ {
+ // Ensure the DJI for the latest version of this method has been pre-created.
+ // It's not clear whether this is necessary or not, but it shouldn't hurt since
+ // we're going to need to create it anyway since we'll be debugging inside it.
+ DebuggerJitInfo *dji = g_pDebugger->GetLatestJitInfoFromMethodDesc(pFD);
+ (void)dji; //prevent "unused variable" error from GCC
+ _ASSERTE( dji != NULL );
+ }
+ EX_CATCH
+ {
+ // GetLatestJitInfo could throw on OOM, but the debugger isn't resiliant to OOM.
+ // I'm not aware of any other legitimate reason why it may throw, so we'll ASSERT
+ // if it fails.
+ _ASSERTE(!"Unexpected exception from Debugger::GetLatestJitInfoFromMethodDesc on EnC remap complete");
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ // Send an EnC remap complete event to the Right Side.
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce,
+ DB_IPCE_ENC_REMAP_COMPLETE,
+ thread,
+ thread->GetDomain());
+
+
+ ipce->EnCRemapComplete.funcMetadataToken = pFD->GetMemberDef();
+
+ Module *pRuntimeModule = pFD->GetModule();
+
+ DebuggerModule * pDModule = LookupOrCreateModule(pRuntimeModule, thread->GetDomain());
+ ipce->EnCRemapComplete.vmDomainFile.SetRawPtr((pDModule ? pDModule->GetDomainFile() : NULL));
+
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LASEnCRC: %s::%s "
+ "dmod:0x%x, methodDef:0x%x \n",
+ pFD->m_pszDebugClassName, pFD->m_pszDebugMethodName,
+ pDModule,
+ ipce->EnCRemap.funcMetadataToken));
+
+ // IPC event is now initialized, so we can send it over.
+ SendSimpleIPCEventAndBlock();
+
+ // This will block on the continue
+ SENDIPCEVENT_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LASEnCRC: done\n"));
+
+}
+//
+// This function sends a notification to the RS about a specific update that has occurred as part of
+// applying an Edit and Continue. We send notification only for function add/update and field add.
+// At this point, the EE is already stopped for handling an EnC ApplyChanges operation, so no need
+// to take locks etc.
+//
+void Debugger::SendEnCUpdateEvent(DebuggerIPCEventType eventType,
+ Module * pModule,
+ mdToken memberToken,
+ mdTypeDef classToken,
+ SIZE_T enCVersion)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LASEnCUFE:\n"));
+
+ _ASSERTE(eventType == DB_IPCE_ENC_UPDATE_FUNCTION ||
+ eventType == DB_IPCE_ENC_ADD_FUNCTION ||
+ eventType== DB_IPCE_ENC_ADD_FIELD);
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ // Send an EnC UpdateFunction event to the Right Side.
+ DebuggerIPCEvent* event = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(event,
+ eventType,
+ NULL,
+ NULL);
+
+ event->EnCUpdate.newVersionNumber = enCVersion;
+ event->EnCUpdate.memberMetadataToken = memberToken;
+ // we have to pass the class token across to the RS because we cannot look it up over
+ // there based on the added field/method because the metadata on the RS will not yet
+ // have the changes applied, so the token will not exist in its metadata and we have
+ // no way to find it.
+ event->EnCUpdate.classMetadataToken = classToken;
+
+ _ASSERTE(pModule);
+ // we don't support shared assemblies, so must have an appdomain
+ _ASSERTE(pModule->GetDomain()->IsAppDomain());
+
+ DebuggerModule * pDModule = LookupOrCreateModule(pModule, pModule->GetDomain()->AsAppDomain());
+ event->EnCUpdate.vmDomainFile.SetRawPtr((pDModule ? pDModule->GetDomainFile() : NULL));
+
+ m_pRCThread->SendIPCEvent();
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LASEnCUE: done\n"));
+
+}
+
+
+//
+// Send a BreakpointSetError event to the Right Side if the given patch is for a breakpoint. Note: we don't care if this
+// fails, there is nothing we can do about it anyway, and the breakpoint just wont hit.
+//
+void Debugger::LockAndSendBreakpointSetError(PATCH_UNORDERED_ARRAY * listUnbindablePatches)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(listUnbindablePatches != NULL);
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+
+ ULONG count = listUnbindablePatches->Count();
+ _ASSERTE(count > 0); // must send at least 1 event.
+
+
+ Thread *thread = g_pEEInterface->GetThread();
+ // Note that the debugger lock is reentrant, so we may or may not hold it already.
+ SENDIPCEVENT_BEGIN(this, thread);
+
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+
+ for(ULONG i = 0; i < count; i++)
+ {
+ DebuggerControllerPatch *patch = listUnbindablePatches->Table()[i];
+ _ASSERTE(patch != NULL);
+
+ // Only do this for breakpoint controllers
+ DebuggerController *controller = patch->controller;
+
+ if (controller->GetDCType() != DEBUGGER_CONTROLLER_BREAKPOINT)
+ {
+ continue;
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LASBSE:\n"));
+
+ // Send a breakpoint set error event to the Right Side.
+ InitIPCEvent(ipce, DB_IPCE_BREAKPOINT_SET_ERROR, thread, thread->GetDomain());
+
+ ipce->BreakpointSetErrorData.breakpointToken.Set(static_cast<DebuggerBreakpoint*> (controller));
+
+ // IPC event is now initialized, so we can send it over.
+ m_pRCThread->SendIPCEvent();
+ }
+
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+
+ // This will block on the continue
+ SENDIPCEVENT_END;
+
+}
+
+//
+// Called from the controller to lock the debugger for event
+// sending. This is called before controller events are sent, like
+// breakpoint, step complete, and thread started.
+//
+// Note that it's possible that the debugger detached (and destroyed our IPC
+// events) while we're waiting for our turn.
+// So Callers should check for that case.
+void Debugger::LockForEventSending(DebuggerLockHolder *dbgLockHolder)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ // @todo - Force our parents to bump up the stop-count. That way they can
+ // guarantee it's balanced.
+ IncCantStopCount();
+ _ASSERTE(IsInCantStopRegion());
+
+ // What we need is for caller to get the debugger lock
+ if (dbgLockHolder != NULL)
+ {
+ dbgLockHolder->Acquire();
+ }
+
+#ifdef _DEBUG
+ // Track our TID. We're not re-entrant.
+ //_ASSERTE(m_tidLockedForEventSending == 0);
+ m_tidLockedForEventSending = GetCurrentThreadId();
+#endif
+
+}
+
+//
+// Called from the controller to unlock the debugger from event
+// sending. This is called after controller events are sent, like
+// breakpoint, step complete, and thread started.
+//
+void Debugger::UnlockFromEventSending(DebuggerLockHolder *dbgLockHolder)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ //_ASSERTE(m_tidLockedForEventSending == GetCurrentThreadId());
+ m_tidLockedForEventSending = 0;
+#endif
+ if (dbgLockHolder != NULL)
+ {
+ dbgLockHolder->Release();
+ }
+ // @todo - Force our parents to bump up the stop-count. That way they can
+ // guarantee it's balanced.
+ _ASSERTE(IsInCantStopRegion());
+ DecCantStopCount();
+}
+
+
+//
+// Called from the controller after all events have been sent for a
+// thread to sync the process.
+//
+void Debugger::SyncAllThreads(DebuggerLockHolder *dbgLockHolder)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ STRESS_LOG0(LF_CORDB, LL_INFO10000, "D::SAT: sync all threads.\n");
+
+ Thread *pThread = g_pEEInterface->GetThread();
+ (void)pThread; //prevent "unused variable" error from GCC
+ _ASSERTE((pThread &&
+ !pThread->m_fPreemptiveGCDisabled) ||
+ g_fInControlC);
+
+ _ASSERTE(ThreadHoldsLock());
+
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+}
+
+//---------------------------------------------------------------------------------------
+// Launch a debugger and then trigger a breakpoint (either managed or native)
+//
+// Arguments:
+// useManagedBPForManagedAttach - TRUE if we should stop with a managed breakpoint
+// when managed attached, FALSE if we should always
+// stop with a native breakpoint
+// pThread - the managed thread that attempts to launch the registered debugger
+// pExceptionInfo - the unhandled exception info
+// explicitUserRequest - TRUE if this attach is caused by a call to the Debugger.Launch() API.
+//
+// Returns:
+// S_OK on success. Else failure.
+//
+// Notes:
+// This function doesn't try to stop the launched native debugger by calling DebugBreak().
+// It sends a breakpoint event only for managed debuggers.
+//
+HRESULT Debugger::LaunchDebuggerForUser(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo,
+ BOOL useManagedBPForManagedAttach, BOOL explicitUserRequest)
+{
+ WRAPPER_NO_CONTRACT;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LDFU: Attaching Debugger.\n"));
+
+ //
+ // Initiate a jit attach
+ //
+ JitAttach(pThread, pExceptionInfo, useManagedBPForManagedAttach, explicitUserRequest);
+
+ if (useManagedBPForManagedAttach)
+ {
+ if(CORDebuggerAttached() && (g_pEEInterface->GetThread() != NULL))
+ {
+ //
+ // Send a managed-breakpoint.
+ //
+ SendUserBreakpointAndSynchronize(g_pEEInterface->GetThread());
+ }
+ else if (!CORDebuggerAttached() && IsDebuggerPresent())
+ {
+ //
+ // If the registered debugger is not a managed debugger, send a native breakpoint
+ //
+ DebugBreak();
+ }
+ }
+ else if(!useManagedBPForManagedAttach)
+ {
+ //
+ // Send a native breakpoint
+ //
+ DebugBreak();
+ }
+
+ if (!IsDebuggerPresent())
+ {
+ LOG((LF_CORDB, LL_ERROR, "D::LDFU: Failed to launch the debugger.\n"));
+ }
+
+ return S_OK;
+}
+
+
+// The following JDI structures will be passed to a debugger on Vista. Because we do not know when the debugger
+// will be done looking at them, and there is at most one debugger attaching to the process, we always set them
+// once and leave them set without the risk of clobbering something we care about.
+JIT_DEBUG_INFO Debugger::s_DebuggerLaunchJitInfo = {0};
+EXCEPTION_RECORD Debugger::s_DebuggerLaunchJitInfoExceptionRecord = {0};
+CONTEXT Debugger::s_DebuggerLaunchJitInfoContext = {0};
+
+//----------------------------------------------------------------------------
+//
+// InitDebuggerLaunchJitInfo - initialize JDI structure on Vista
+//
+// Arguments:
+// pThread - the managed thread with the unhandled excpetion
+// pExceptionInfo - unhandled exception info
+//
+// Return Value:
+// None
+//
+//----------------------------------------------------------------------------
+void Debugger::InitDebuggerLaunchJitInfo(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE((pExceptionInfo != NULL) &&
+ (pExceptionInfo->ContextRecord != NULL) &&
+ (pExceptionInfo->ExceptionRecord != NULL));
+
+ if ((pExceptionInfo == NULL) || (pExceptionInfo->ContextRecord == NULL) || (pExceptionInfo->ExceptionRecord == NULL))
+ {
+ return;
+ }
+
+ s_DebuggerLaunchJitInfoExceptionRecord = *pExceptionInfo->ExceptionRecord;
+ s_DebuggerLaunchJitInfoContext = *pExceptionInfo->ContextRecord;
+
+ s_DebuggerLaunchJitInfo.dwSize = sizeof(s_DebuggerLaunchJitInfo);
+ s_DebuggerLaunchJitInfo.dwThreadID = pThread == NULL ? GetCurrentThreadId() : pThread->GetOSThreadId();
+ s_DebuggerLaunchJitInfo.lpExceptionRecord = reinterpret_cast<ULONG64>(&s_DebuggerLaunchJitInfoExceptionRecord);
+ s_DebuggerLaunchJitInfo.lpContextRecord = reinterpret_cast<ULONG64>(&s_DebuggerLaunchJitInfoContext);
+ s_DebuggerLaunchJitInfo.lpExceptionAddress = s_DebuggerLaunchJitInfoExceptionRecord.ExceptionAddress != NULL ?
+ reinterpret_cast<ULONG64>(s_DebuggerLaunchJitInfoExceptionRecord.ExceptionAddress) :
+ reinterpret_cast<ULONG64>(reinterpret_cast<PVOID>(GetIP(pExceptionInfo->ContextRecord)));
+
+#if defined(_TARGET_X86_)
+ s_DebuggerLaunchJitInfo.dwProcessorArchitecture = PROCESSOR_ARCHITECTURE_INTEL;
+#elif defined(_TARGET_AMD64_)
+ s_DebuggerLaunchJitInfo.dwProcessorArchitecture = PROCESSOR_ARCHITECTURE_AMD64;
+#elif defined(_TARGET_ARM_)
+ s_DebuggerLaunchJitInfo.dwProcessorArchitecture = PROCESSOR_ARCHITECTURE_ARM;
+#elif defined(_TARGET_ARM64_)
+ s_DebuggerLaunchJitInfo.dwProcessorArchitecture = PROCESSOR_ARCHITECTURE_ARM64;
+#else
+#error Unknown processor.
+#endif
+}
+
+
+//----------------------------------------------------------------------------
+//
+// GetDebuggerLaunchJitInfo - retrieve the initialized JDI structure on Vista
+//
+// Arguments:
+// None
+//
+// Return Value:
+// JIT_DEBUG_INFO * - pointer to JDI structure
+//
+//----------------------------------------------------------------------------
+JIT_DEBUG_INFO * Debugger::GetDebuggerLaunchJitInfo(void)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE((s_DebuggerLaunchJitInfo.lpExceptionAddress != NULL) &&
+ (s_DebuggerLaunchJitInfo.lpExceptionRecord != NULL) &&
+ (s_DebuggerLaunchJitInfo.lpContextRecord != NULL) &&
+ (((EXCEPTION_RECORD *)(s_DebuggerLaunchJitInfo.lpExceptionRecord))->ExceptionAddress != NULL));
+
+ return &s_DebuggerLaunchJitInfo;
+}
+#endif // !DACCESS_COMPILE
+
+
+// This function checks the registry for the debug launch setting upon encountering an exception or breakpoint.
+DebuggerLaunchSetting Debugger::GetDbgJITDebugLaunchSetting()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#if FEATURE_PAL
+ DebuggerLaunchSetting setting = DLS_ATTACH_DEBUGGER;
+#else
+ BOOL bAuto = FALSE;
+
+ DebuggerLaunchSetting setting = DLS_ASK_USER;
+
+ DWORD cchDbgFormat = MAX_PATH;
+ INDEBUG(DWORD cchOldDbgFormat = cchDbgFormat);
+
+#if defined(DACCESS_COMPILE)
+ WCHAR * wszDbgFormat = new (nothrow) WCHAR[cchDbgFormat];
+#else
+ WCHAR * wszDbgFormat = new (interopsafe, nothrow) WCHAR[cchDbgFormat];
+#endif // DACCESS_COMPILE
+
+ if (wszDbgFormat == NULL)
+ {
+ return setting;
+ }
+
+ HRESULT hr = GetDebuggerSettingInfoWorker(wszDbgFormat, &cchDbgFormat, &bAuto);
+ while (hr == HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER))
+ {
+ _ASSERTE(cchDbgFormat > cchOldDbgFormat);
+ INDEBUG(cchOldDbgFormat = cchDbgFormat);
+
+#if defined(DACCESS_COMPILE)
+ delete [] wszDbgFormat;
+ wszDbgFormat = new (nothrow) WCHAR[cchDbgFormat];
+#else
+ DeleteInteropSafe(wszDbgFormat);
+ wszDbgFormat = new (interopsafe, nothrow) WCHAR[cchDbgFormat];
+#endif // DACCESS_COMPILE
+
+ if (wszDbgFormat == NULL)
+ {
+ return setting;
+ }
+
+ hr = GetDebuggerSettingInfoWorker(wszDbgFormat, &cchDbgFormat, &bAuto);
+ }
+
+#if defined(DACCESS_COMPILE)
+ delete [] wszDbgFormat;
+#else
+ DeleteInteropSafe(wszDbgFormat);
+#endif // DACCESS_COMPILE
+
+ if (SUCCEEDED(hr) && bAuto)
+ {
+ setting = DLS_ATTACH_DEBUGGER;
+ }
+#endif // FEATURE_PAL
+
+ return setting;
+}
+
+// Returns a bitfield reflecting the managed debugging state at the time of
+// the jit attach.
+CLR_DEBUGGING_PROCESS_FLAGS Debugger::GetAttachStateFlags()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (CLR_DEBUGGING_PROCESS_FLAGS)
+ ((m_attachingForManagedEvent ? CLR_DEBUGGING_MANAGED_EVENT_PENDING : 0)
+ | (m_userRequestedDebuggerLaunch ? CLR_DEBUGGING_MANAGED_EVENT_DEBUGGER_LAUNCH : 0));
+}
+
+#ifndef DACCESS_COMPILE
+//-----------------------------------------------------------------------------
+// Get the full launch string for a jit debugger.
+//
+// If a jit-debugger is registed, then writes string into pStrArgsBuf and
+// return true.
+//
+// If no jit-debugger is registered, then return false.
+//
+// Throws on error (like OOM).
+//-----------------------------------------------------------------------------
+bool Debugger::GetCompleteDebuggerLaunchString(SString * pStrArgsBuf)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+ DWORD pid = GetCurrentProcessId();
+
+ SString ssDebuggerString;
+ GetDebuggerSettingInfo(ssDebuggerString, NULL);
+
+ if (ssDebuggerString.IsEmpty())
+ {
+ // No jit-debugger available. Don't make one up.
+ return false;
+ }
+
+ // There is no security concern to expect that the debug string we retrieve from HKLM follows a certain
+ // format because changing HKLM keys requires admin priviledge. Padding with zeros is not a security mitigation,
+ // but rather a forward looking compability measure. If future verions of Windows introduces more parameters for
+ // JIT debugger launch, it is preferrable to pass zeros than other random values for those unsupported parameters.
+ pStrArgsBuf->Printf(ssDebuggerString, pid, GetUnmanagedAttachEvent(), GetDebuggerLaunchJitInfo(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+
+ return true;
+#else // !FEATURE_PAL
+ return false;
+#endif // !FEATURE_PAL
+}
+
+// Proxy code for EDA
+struct EnsureDebuggerAttachedParams
+{
+ Debugger * m_pThis;
+ HRESULT m_retval;
+ PROCESS_INFORMATION * m_pProcessInfo;
+ EnsureDebuggerAttachedParams() :
+ m_pThis(NULL), m_retval(E_FAIL), m_pProcessInfo(NULL) {LIMITED_METHOD_CONTRACT; }
+};
+
+// This is called by the helper thread
+void EDAHelperStub(EnsureDebuggerAttachedParams * p)
+{
+ WRAPPER_NO_CONTRACT;
+
+ p->m_retval = p->m_pThis->EDAHelper(p->m_pProcessInfo);
+}
+
+// This gets called just like the normal version, but it sends the call over to the helper thread
+HRESULT Debugger::EDAHelperProxy(PROCESS_INFORMATION * pProcessInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!ThisIsHelperThreadWorker());
+ _ASSERTE(ThreadHoldsLock());
+
+ HRESULT hr = LazyInitWrapper();
+ if (FAILED(hr))
+ {
+ // We already stress logged this case.
+ return hr;
+ }
+
+
+ if (!IsGuardPageGone())
+ {
+ return EDAHelper(pProcessInfo);
+ }
+
+ EnsureDebuggerAttachedParams p;
+ p.m_pThis = this;
+ p.m_pProcessInfo = pProcessInfo;
+
+ LOG((LF_CORDB, LL_INFO1000000, "D::EDAHelperProxy\n"));
+ m_pRCThread->DoFavor((FAVORCALLBACK) EDAHelperStub, &p);
+ LOG((LF_CORDB, LL_INFO1000000, "D::EDAHelperProxy return\n"));
+
+ return p.m_retval;
+}
+
+// E_ABORT - if the attach was declined
+// S_OK - Jit-attach successfully started
+HRESULT Debugger::EDAHelper(PROCESS_INFORMATION *pProcessInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+
+ PRECONDITION(ThisMaybeHelperThread()); // on helper if stackoverflow.
+ }
+ CONTRACTL_END;
+
+#ifndef FEATURE_PAL
+ LOG((LF_CORDB, LL_INFO10000, "D::EDA: thread 0x%x is launching the debugger.\n", GetCurrentThreadId()));
+
+ _ASSERTE(HasLazyData());
+
+ // Another potential hang. This may get run on the helper if we have a stack overflow.
+ // Hopefully the odds of 1 thread hitting a stack overflow while another is stuck holding the heap
+ // lock is very small.
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+
+ BOOL fCreateSucceeded = FALSE;
+
+ StackSString strDbgCommand;
+ const WCHAR * wszDbgCommand = NULL;
+ SString strCurrentDir;
+ const WCHAR * wszCurrentDir = NULL;
+
+ EX_TRY
+ {
+
+ // Get the debugger to launch. The returned string is via the strDbgCommand out param. Throws on error.
+ bool fHasDebugger = GetCompleteDebuggerLaunchString(&strDbgCommand);
+ if (fHasDebugger)
+ {
+ wszDbgCommand = strDbgCommand.GetUnicode();
+ _ASSERTE(wszDbgCommand != NULL); // would have thrown on oom.
+
+ LOG((LF_CORDB, LL_INFO10000, "D::EDA: launching with command [%S]\n", wszDbgCommand));
+
+ ClrGetCurrentDirectory(strCurrentDir);
+ wszCurrentDir = strCurrentDir.GetUnicode();
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ STARTUPINFOW startupInfo = {0};
+ startupInfo.cb = sizeof(STARTUPINFOW);
+
+ DWORD errCreate = 0;
+
+ if (wszDbgCommand != NULL)
+ {
+ // Create the debugger process
+ // When we are launching an debugger, we need to let the child process inherit our handles.
+ // This is necessary for the debugger to signal us that the attach is complete.
+ fCreateSucceeded = WszCreateProcess(NULL, const_cast<WCHAR*> (wszDbgCommand),
+ NULL, NULL,
+ TRUE,
+ CREATE_NEW_CONSOLE,
+ NULL, wszCurrentDir,
+ &startupInfo,
+ pProcessInfo);
+ errCreate = GetLastError();
+ }
+
+ if (!fCreateSucceeded)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "D::EDA: debugger did not launch successfully.\n"));
+ return E_ABORT;
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "D::EDA: debugger launched successfully.\n"));
+ return S_OK;
+#else // !FEATURE_PAL
+ return E_ABORT;
+#endif // !FEATURE_PAL
+}
+
+// ---------------------------------------------------------------------------------------------------------------------
+// This function decides who wins the race for any jit attach and marks the appropriate state that a jit
+// attach is in progress.
+//
+// Arguments
+// willSendManagedEvent - indicates whether or not we plan to send a managed debug event after the jit attach
+// explicitUserRequest - TRUE if this attach is caused by a call to the Debugger.Launch() API.
+//
+// Returns
+// TRUE - if some other thread already has jit attach in progress -> this thread should block until that is complete
+// FALSE - this is the first thread to jit attach -> this thread should launch the debugger
+//
+//
+BOOL Debugger::PreJitAttach(BOOL willSendManagedEvent, BOOL willLaunchDebugger, BOOL explicitUserRequest)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(!ThisIsHelperThreadWorker());
+ }
+ CONTRACTL_END;
+
+ LOG( (LF_CORDB, LL_INFO10000, "D::PreJA: Entering\n") );
+
+ // Multiple threads may be calling this, so need to take the lock.
+ if(!m_jitAttachInProgress)
+ {
+ // TODO: This is a known deadlock! Debugger::PreJitAttach is called during WatsonLastChance.
+ // If the event (exception/crash) happens while this thread is holding the ThreadStore
+ // lock, we may deadlock if another thread holds the DebuggerMutex and is waiting on
+ // the ThreadStore lock. The DebuggerMutex has to be broken into two smaller locks
+ // so that you can take that lock here when holding the ThreadStore lock.
+ DebuggerLockHolder dbgLockHolder(this);
+
+ if (!m_jitAttachInProgress)
+ {
+ m_jitAttachInProgress = TRUE;
+ m_attachingForManagedEvent = willSendManagedEvent;
+ m_launchingDebugger = willLaunchDebugger;
+ m_userRequestedDebuggerLaunch = explicitUserRequest;
+ ResetEvent(GetUnmanagedAttachEvent());
+ ResetEvent(GetAttachEvent());
+ LOG( (LF_CORDB, LL_INFO10000, "D::PreJA: Leaving - first thread\n") );
+ return TRUE;
+ }
+ }
+
+ LOG( (LF_CORDB, LL_INFO10000, "D::PreJA: Leaving - following thread\n") );
+ return FALSE;
+}
+
+//---------------------------------------------------------------------------------------------------------------------
+// This function gets the jit debugger launched and waits for the native attach to complete
+// Make sure you called PreJitAttach and it returned TRUE before you call this
+//
+// Arguments:
+// pThread - the managed thread with the unhandled excpetion
+// pExceptionInfo - the unhandled exception info
+//
+// Returns:
+// S_OK if the debugger was launched successfully and a failing HRESULT otherwise
+//
+HRESULT Debugger::LaunchJitDebuggerAndNativeAttach(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(!ThisIsHelperThreadWorker());
+ }
+ CONTRACTL_END;
+
+ // You need to have called PreJitAttach first to determine which thread gets to launch the debugger
+ _ASSERTE(m_jitAttachInProgress);
+
+ LOG( (LF_CORDB, LL_INFO10000, "D::LJDANA: Entering\n") );
+ PROCESS_INFORMATION processInfo = {0};
+ DebuggerLockHolder dbgLockHolder(this);
+
+ // <TODO>
+ // If the JIT debugger failed to launch or if there is no JIT debugger, EDAHelperProxy will
+ // switch to preemptive GC mode to display a dialog to the user indicating the JIT debugger
+ // was unavailable. There are some rare cases where this could cause a deadlock with the
+ // debugger lock; however these are rare enough that fixing this doesn't meet the bar for
+ // Whidbey at this point. We might want to revisit this later however.
+ // </TODO>
+ CONTRACT_VIOLATION(GCViolation);
+
+ {
+ LOG((LF_CORDB, LL_INFO1000, "D::EDA: Initialize JDI.\n"));
+
+ EXCEPTION_POINTERS exceptionPointer;
+ EXCEPTION_RECORD exceptionRecord;
+ CONTEXT context;
+
+ if (pExceptionInfo == NULL)
+ {
+ ZeroMemory(&exceptionPointer, sizeof(exceptionPointer));
+ ZeroMemory(&exceptionRecord, sizeof(exceptionRecord));
+ ZeroMemory(&context, sizeof(context));
+
+ context.ContextFlags = CONTEXT_CONTROL;
+ ClrCaptureContext(&context);
+
+ exceptionRecord.ExceptionAddress = reinterpret_cast<PVOID>(GetIP(&context));
+ exceptionPointer.ContextRecord = &context;
+ exceptionPointer.ExceptionRecord = &exceptionRecord;
+
+ pExceptionInfo = &exceptionPointer;
+ }
+
+ InitDebuggerLaunchJitInfo(pThread, pExceptionInfo);
+ }
+
+ // This will make the CreateProcess call to create the debugger process.
+ // We then expect that the debugger process will turn around and attach to us.
+ HRESULT hr = EDAHelperProxy(&processInfo);
+ if(FAILED(hr))
+ {
+ return hr;
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LJDANA: waiting on m_exUnmanagedAttachEvent and debugger's process handle\n"));
+ DWORD dwHandles = 2;
+ HANDLE arrHandles[2];
+ arrHandles[0] = GetUnmanagedAttachEvent();
+ arrHandles[1] = processInfo.hProcess;
+
+ // Let the helper thread do the attach logic for us and wait for the
+ // attach event. Must release the lock before blocking on a wait.
+ dbgLockHolder.Release();
+
+ // Wait for one or the other to be set. Multiple threads could be waiting here.
+ // The events are manual events, so when they go high, all threads will be released.
+ DWORD res = WaitForMultipleObjectsEx(dwHandles, arrHandles, FALSE, INFINITE, FALSE);
+
+ // We no long need to keep handles to the debugger process.
+ CloseHandle(processInfo.hProcess);
+ CloseHandle(processInfo.hThread);
+
+ // Indicate to the caller that the attach was aborted
+ if (res == WAIT_OBJECT_0 + 1)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "D::LJDANA: Debugger process is unexpectedly terminated!\n"));
+ return E_FAIL;
+ }
+
+ // Otherwise, attach was successful (Note, only native attach is done so far)
+ _ASSERTE((res == WAIT_OBJECT_0) && "WaitForMultipleObjectsEx failed!");
+ LOG( (LF_CORDB, LL_INFO10000, "D::LJDANA: Leaving\n") );
+ return S_OK;
+
+}
+
+// Blocks until the debugger completes jit attach
+void Debugger::WaitForDebuggerAttach()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ LOG( (LF_CORDB, LL_INFO10000, "D::WFDA:Entering\n") );
+
+ // if this thread previously called LaunchDebuggerAndNativeAttach then this wait is spurious,
+ // the event is still set and it continues immediately. If this is an auxilliary thread however
+ // then the wait is necessary
+ // If we are not launching the debugger (e.g. unhandled exception on Win7), then we should not
+ // wait on the unmanaged attach event. If the debugger is launched by the OS, then the unmanaged
+ // attach event passed to the debugger is created by the OS, not by us, so our event will never
+ // be signaled.
+ if (m_launchingDebugger)
+ {
+ WaitForSingleObject(GetUnmanagedAttachEvent(), INFINITE);
+ }
+
+ // Wait until the pending managed debugger attach is completed
+ if (CORDebuggerPendingAttach() && !CORDebuggerAttached())
+ {
+ LOG( (LF_CORDB, LL_INFO10000, "D::WFDA: Waiting for managed attach too\n") );
+ WaitForSingleObject(GetAttachEvent(), INFINITE);
+ }
+
+ // We can't reset the event here because some threads may
+ // be just about to wait on it. If we reset it before the
+ // other threads hit the wait, they'll block.
+
+ // We have an innate race here that can't easily fix. The best
+ // we can do is have a super small window (by moving the reset as
+ // far out this making it very unlikely that a thread will
+ // hit the window.
+
+ LOG( (LF_CORDB, LL_INFO10000, "D::WFDA: Leaving\n") );
+}
+
+// Cleans up after jit attach is complete
+void Debugger::PostJitAttach()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ PRECONDITION(!ThisIsHelperThreadWorker());
+ }
+ CONTRACTL_END;
+
+ LOG( (LF_CORDB, LL_INFO10000, "D::PostJA: Entering\n") );
+ // Multiple threads may be calling this, so need to take the lock.
+ DebuggerLockHolder dbgLockHolder(this);
+
+ // clear the attaching flags which allows other threads to initiate jit attach if needed
+ m_jitAttachInProgress = FALSE;
+ m_attachingForManagedEvent = FALSE;
+ m_launchingDebugger = FALSE;
+ m_userRequestedDebuggerLaunch = FALSE;
+ // set the attaching events to unblock other threads waiting on this attach
+ // regardless of whether or not it completed
+ SetEvent(GetUnmanagedAttachEvent());
+ SetEvent(GetAttachEvent());
+ LOG( (LF_CORDB, LL_INFO10000, "D::PostJA: Leaving\n") );
+}
+
+//---------------------------------------------------------------------------------------
+// Launches a debugger and blocks waiting for it to either attach or abort the attach.
+//
+// Arguments:
+// pThread - the managed thread with the unhandled excpetion
+// pExceptionInfo - the unhandled exception info
+// willSendManagedEvent - TRUE if after getting attached we will send a managed debug event
+// explicitUserRequest - TRUE if this attach is caused by a call to the Debugger.Launch() API.
+//
+// Returns:
+// None. Callers can requery if a debugger is attached.
+//
+// Assumptions:
+// This may be called by multiple threads, each firing their own debug events. This function will handle locking.
+// Thus this could block for an arbitrary length of time:
+// - may need to prompt the user to decide if an attach occurs.
+// - may block waiting for a debugger to attach.
+//
+// Notes:
+// The launch string is retrieved from code:GetDebuggerSettingInfo.
+// This will not do a sync-complete. Instead, the caller can send a debug event (the jit-attach
+// event, such as a User-breakpoint or unhandled exception) and that can send a sync-complete,
+// just as if the debugger was always attached. This ensures that the jit-attach event is in the
+// same callback queue as any faked-up events that the Right-side Shim creates.
+//
+void Debugger::JitAttach(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo, BOOL willSendManagedEvent, BOOL explicitUserRequest)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ PRECONDITION(!ThisIsHelperThreadWorker()); // Must be a managed thread
+ }
+ CONTRACTL_END;
+
+ if (IsDebuggerPresent())
+ return;
+
+ GCX_PREEMP_EEINTERFACE_TOGGLE_IFTHREAD();
+
+ EnsureDebuggerAttached(pThread, pExceptionInfo, willSendManagedEvent, explicitUserRequest);
+}
+
+//-----------------------------------------------------------------------------
+// Ensure that a debugger is attached. Will jit-attach if needed.
+//
+// Arguments
+// pThread - the managed thread with the unhandled excpetion
+// pExceptionInfo - the unhandled exception info
+// willSendManagedEvent - true if after getting (or staying) attached we will send
+// a managed debug event
+// explicitUserRequest - true if this attach is caused by a call to the
+// Debugger.Launch() API.
+//
+// Returns:
+// None. Either a debugger is attached or it is not.
+//
+// Notes:
+// There are several intermediate possible outcomes:
+// - Debugger already attached before this was called.
+// - JIT-atttach debugger spawned, and attached successfully.
+// - JIT-attach debugger spawned, but declined to attach.
+// - Failed to spawn jit-attach debugger.
+//
+// Ultimately, the only thing that matters at the end is whether a debugger
+// is now attached, which is retreived via CORDebuggerAttached().
+//-----------------------------------------------------------------------------
+void Debugger::EnsureDebuggerAttached(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo, BOOL willSendManagedEvent, BOOL explicitUserRequest)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(!ThisIsHelperThreadWorker());
+ }
+ CONTRACTL_END;
+
+ LOG( (LF_CORDB,LL_INFO10000,"D::EDA\n") );
+
+ HRESULT hr = S_OK;
+
+ // We could be in three states:
+ // 1) no debugger attached
+ // 2) native attached but not managed (yet?)
+ // 3) native attached and managed
+
+
+ // There is a race condition here that can be hit if multiple threads
+ // were to trigger jit attach at the right time
+ // Thread 1 starts jit attach
+ // Thread 2 also starts jit attach and gets to waiting for the attach complete
+ // Thread 1 rapidly completes the jit attach then starts it again
+ // Thread 2 may still be waiting from the first jit attach at this point
+ //
+ // Note that this isn't all that bad because if the debugger hasn't actually detached
+ // in the middle then the second jit attach will complete almost instantly and thread 2
+ // is unblocked. If the debugger did detach in the middle then it seems reasonable for
+ // thread 2 to continue to wait until until the debugger is attached once again for the
+ // second attach. Basically if one jit attach completes and restarts fast enough it might
+ // just go unnoticed by some threads and it will be as if it never happened. Doesn't seem
+ // that bad as long as we know another jit attach is again in progress.
+
+ BOOL startedJitAttach = FALSE;
+
+ // First check to see if we need to launch the debugger ourselves
+ if(PreJitAttach(willSendManagedEvent, TRUE, explicitUserRequest))
+ {
+ // if the debugger is already attached then we can't launch one
+ // and whatever attach state we are in is just what we get
+ if(IsDebuggerPresent())
+ {
+ // unblock other threads waiting on our attach and clean up
+ PostJitAttach();
+ return;
+ }
+ else
+ {
+ hr = LaunchJitDebuggerAndNativeAttach(pThread, pExceptionInfo);
+ if(FAILED(hr))
+ {
+ // unblock other threads waiting on our attach and clean up
+ PostJitAttach();
+ return;
+ }
+ }
+ startedJitAttach = TRUE;
+ }
+
+ // at this point someone should have launched the native debugger and
+ // it is somewhere between not attached and attach complete
+ // (it might have even been completely attached before this function even started)
+ // step 2 - wait for the attach to complete
+ WaitForDebuggerAttach();
+
+ // step 3 - if we initiated then we also cleanup
+ if(startedJitAttach)
+ PostJitAttach();
+ LOG( (LF_CORDB, LL_INFO10000, "D::EDA:Leaving\n") );
+}
+
+
+// Proxy code for AttachDebuggerForBreakpoint
+// Structure used in the proxy function callback
+struct SendExceptionOnHelperThreadParams
+{
+ Debugger *m_pThis;
+ HRESULT m_retval;
+ Thread *m_pThread;
+ OBJECTHANDLE m_exceptionHandle;
+ bool m_continuable;
+ FramePointer m_framePointer;
+ SIZE_T m_nOffset;
+ CorDebugExceptionCallbackType m_eventType;
+ DWORD m_dwFlags;
+
+
+ SendExceptionOnHelperThreadParams() :
+ m_pThis(NULL),
+ m_retval(S_OK),
+ m_pThread(NULL)
+ {LIMITED_METHOD_CONTRACT; }
+};
+
+//**************************************************************************
+// This function sends Exception and ExceptionCallback2 event.
+//
+// Arguments:
+// pThread : managed thread which exception takes place
+// exceptionHandle : handle to the managed exception object (usually
+// something derived from System.Exception)
+// fContinuable : true iff continuable
+// framePointer : frame pointer associated with callback.
+// nOffset : il offset associated with callback.
+// eventType : type of callback
+// dwFlags : additional flags (see CorDebugExceptionFlags).
+//
+// Returns:
+// S_OK on sucess. Else some error. May also throw.
+//
+// Notes:
+// This is a helper for code:Debugger.SendExceptionEventsWorker.
+// See code:Debugger.SendException for more details about parameters.
+// This is always called on a managed thread (never the helper thread)
+// This will synchronize and block.
+//**************************************************************************
+HRESULT Debugger::SendExceptionHelperAndBlock(
+ Thread *pThread,
+ OBJECTHANDLE exceptionHandle,
+ bool fContinuable,
+ FramePointer framePointer,
+ SIZE_T nOffset,
+ CorDebugExceptionCallbackType eventType,
+ DWORD dwFlags)
+
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pThread));
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ // This is a normal event to send from LS to RS
+ SENDIPCEVENT_BEGIN(this, pThread);
+
+ // This function can be called on helper thread or managed thread.
+ // However, we should be holding locks upon entry
+
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+
+ //
+ // Send pre-Whidbey EXCEPTION IPC event.
+ //
+ InitIPCEvent(ipce, DB_IPCE_EXCEPTION, pThread, pThread->GetDomain());
+
+ ipce->Exception.vmExceptionHandle.SetRawPtr(exceptionHandle);
+ ipce->Exception.firstChance = (eventType == DEBUG_EXCEPTION_FIRST_CHANCE);
+ ipce->Exception.continuable = fContinuable;
+ hr = m_pRCThread->SendIPCEvent();
+
+ _ASSERTE(SUCCEEDED(hr) && "D::SE: Send ExceptionCallback event failed.");
+
+ //
+ // Send Whidbey EXCEPTION IPC event.
+ //
+ InitIPCEvent(ipce, DB_IPCE_EXCEPTION_CALLBACK2, pThread, pThread->GetDomain());
+
+ ipce->ExceptionCallback2.framePointer = framePointer;
+ ipce->ExceptionCallback2.eventType = eventType;
+ ipce->ExceptionCallback2.nOffset = nOffset;
+ ipce->ExceptionCallback2.dwFlags = dwFlags;
+ ipce->ExceptionCallback2.vmExceptionHandle.SetRawPtr(exceptionHandle);
+
+ LOG((LF_CORDB, LL_INFO10000, "D::SE: sending ExceptionCallback2 event"));
+ hr = m_pRCThread->SendIPCEvent();
+
+ if (eventType == DEBUG_EXCEPTION_FIRST_CHANCE)
+ {
+ pThread->GetExceptionState()->GetFlags()->SetSentDebugFirstChance();
+ }
+ else
+ {
+ _ASSERTE(eventType == DEBUG_EXCEPTION_UNHANDLED);
+ }
+
+ _ASSERTE(SUCCEEDED(hr) && "D::SE: Send ExceptionCallback2 event failed.");
+
+ if (SUCCEEDED(hr))
+ {
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+ }
+
+ // Let other Runtime threads handle their events.
+ SENDIPCEVENT_END;
+
+ return hr;
+
+}
+
+// Send various first-chance / unhandled exception events.
+//
+// Assumptions:
+// Caller has already determined that we want to send exception events.
+//
+// Notes:
+// This is a helper function for code:Debugger.SendException
+void Debugger::SendExceptionEventsWorker(
+ Thread * pThread,
+ bool fFirstChance,
+ bool fIsInterceptable,
+ bool fContinuable,
+ SIZE_T currentIP,
+ FramePointer framePointer,
+ bool atSafePlace)
+{
+ HRESULT hr = S_OK;
+
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+ //
+ // Figure out parameters to the IPC events.
+ //
+ const BYTE *ip;
+
+ SIZE_T nOffset = (SIZE_T)ICorDebugInfo::NO_MAPPING;
+ DebuggerMethodInfo *pDebugMethodInfo = NULL;
+
+ // If we're passed a zero IP or SP, then go to the ThreadExceptionState on the thread to get the data. Note:
+ // we can only do this if there is a context in the pExState. There are cases (most notably the
+ // EEPolicy::HandleFatalError case) where we don't have that. So we just leave the IP/SP 0.
+ if ((currentIP == 0) && (pExState->GetContextRecord() != NULL))
+ {
+ ip = (BYTE *)GetIP(pExState->GetContextRecord());
+ }
+ else
+ {
+ ip = (BYTE *)currentIP;
+ }
+
+ if (g_pEEInterface->IsManagedNativeCode(ip))
+ {
+
+ MethodDesc *pMethodDesc = g_pEEInterface->GetNativeCodeMethodDesc(PCODE(ip));
+ _ASSERTE(pMethodDesc != NULL);
+
+ if (pMethodDesc != NULL)
+ {
+ DebuggerJitInfo *pDebugJitInfo = GetJitInfo(pMethodDesc, ip, &pDebugMethodInfo);
+
+ if (pDebugJitInfo != NULL)
+ {
+ SIZE_T nativeOffset = CodeRegionInfo::GetCodeRegionInfo(pDebugJitInfo, pMethodDesc).AddressToOffset(ip);
+ CorDebugMappingResult mapResult;
+ DWORD which;
+
+ nOffset = pDebugJitInfo->MapNativeOffsetToIL(nativeOffset, &mapResult, &which);
+ }
+ }
+ }
+
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+
+ if (fFirstChance)
+ {
+ // We can call into this method when there is no exception in progress to alert
+ // the debugger to a stack overflow, however that case should never specify first
+ // chance. An exception must be in progress to check the flags on the exception state
+ _ASSERTE(pThread->IsExceptionInProgress());
+
+ //
+ // Send the first chance exception if we have not already and if it is not suppressed
+ //
+ if (m_sendExceptionsOutsideOfJMC && !pExState->GetFlags()->SentDebugFirstChance())
+ {
+ // Blocking here is especially important so that the debugger can mark any code as JMC.
+ hr = SendExceptionHelperAndBlock(
+ pThread,
+ g_pEEInterface->GetThreadException(pThread),
+ fContinuable,
+ framePointer,
+ nOffset,
+ DEBUG_EXCEPTION_FIRST_CHANCE,
+ fIsInterceptable ? DEBUG_EXCEPTION_CAN_BE_INTERCEPTED : 0);
+
+ {
+ // Toggle GC into COOP to block this thread.
+ GCX_COOP_EEINTERFACE();
+
+ //
+ // If we weren't at a safe place when we enabled PGC, then go ahead and unmark that fact now that we've successfully
+ // disabled.
+ //
+ if (!atSafePlace)
+ {
+ g_pDebugger->DecThreadsAtUnsafePlaces();
+ }
+
+ ProcessAnyPendingEvals(pThread);
+
+ //
+ // If we weren't at a safe place, increment the unsafe count before we enable preemptive mode.
+ //
+ if (!atSafePlace)
+ {
+ g_pDebugger->IncThreadsAtUnsafePlaces();
+ }
+ } // end of GCX_CCOP_EEINTERFACE();
+ } //end if (m_sendExceptionsOutsideOfJMC && !SentDebugFirstChance())
+
+ //
+ // If this is a JMC function, then we send a USER's first chance as well.
+ //
+ if ((pDebugMethodInfo != NULL) &&
+ pDebugMethodInfo->IsJMCFunction() &&
+ !pExState->GetFlags()->SentDebugUserFirstChance())
+ {
+ SENDIPCEVENT_BEGIN(this, pThread);
+
+ InitIPCEvent(ipce, DB_IPCE_EXCEPTION_CALLBACK2, pThread, pThread->GetDomain());
+
+ ipce->ExceptionCallback2.framePointer = framePointer;
+ ipce->ExceptionCallback2.eventType = DEBUG_EXCEPTION_USER_FIRST_CHANCE;
+ ipce->ExceptionCallback2.nOffset = nOffset;
+ ipce->ExceptionCallback2.dwFlags = fIsInterceptable ? DEBUG_EXCEPTION_CAN_BE_INTERCEPTED : 0;
+ ipce->ExceptionCallback2.vmExceptionHandle.SetRawPtr(g_pEEInterface->GetThreadException(pThread));
+
+ LOG((LF_CORDB, LL_INFO10000, "D::SE: sending ExceptionCallback2 (USER FIRST CHANCE)"));
+ hr = m_pRCThread->SendIPCEvent();
+
+ _ASSERTE(SUCCEEDED(hr) && "D::SE: Send ExceptionCallback2 (User) event failed.");
+
+ if (SUCCEEDED(hr))
+ {
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+ }
+
+ pExState->GetFlags()->SetSentDebugUserFirstChance();
+
+ // Let other Runtime threads handle their events.
+ SENDIPCEVENT_END;
+
+ } // end if (!SentDebugUserFirstChance)
+
+ } // end if (firstChance)
+ else
+ {
+ // unhandled exception case
+ // if there is no exception in progress then we are sending a fake exception object
+ // as an indication of a fatal error (stack overflow). In this case it is illegal
+ // to read GetFlags() from the exception state.
+ // else if there is an exception in progress we only want to send the notification if
+ // we did not already send a CHF, previous unhandled, or unwind begin notification
+ BOOL sendNotification = TRUE;
+ if(pThread->IsExceptionInProgress())
+ {
+ sendNotification = !pExState->GetFlags()->DebugCatchHandlerFound() &&
+ !pExState->GetFlags()->SentDebugUnhandled() &&
+ !pExState->GetFlags()->SentDebugUnwindBegin();
+ }
+
+ if(sendNotification)
+ {
+ hr = SendExceptionHelperAndBlock(
+ pThread,
+ g_pEEInterface->GetThreadException(pThread),
+ fContinuable,
+ LEAF_MOST_FRAME,
+ (SIZE_T)ICorDebugInfo::NO_MAPPING,
+ DEBUG_EXCEPTION_UNHANDLED,
+ fIsInterceptable ? DEBUG_EXCEPTION_CAN_BE_INTERCEPTED : 0);
+
+ if(pThread->IsExceptionInProgress())
+ {
+ pExState->GetFlags()->SetSentDebugUnhandled();
+ }
+ }
+
+ } // end if (!firstChance)
+}
+
+//
+// SendException is called by Runtime threads to send that they've hit an Managed exception to the Right Side.
+// This may block this thread and suspend the debuggee, and let the debugger inspect us.
+//
+// The thread's throwable should be set so that the debugger can inspect the current exception.
+// It does not report native exceptions in native code (which is consistent because those don't have a
+// managed exception object).
+//
+// This may kick off a jit-attach (in which case fAttaching==true), and so may be called even when no debugger
+// is yet involved.
+//
+// Parameters:
+// pThread - the thread throwing the exception.
+// fFirstChance - true if this is a first chance exception. False if this is an unhandled exception.
+// currentIP - absolute native address of the exception if it is from managed code. If this is 0, we try to find it
+// based off the thread's current exception state.
+// currentSP - stack pointer of the exception. This will get converted into a FramePointer and then used by the debugger
+// to identify which stack frame threw the exception.
+// currentBSP - additional information for IA64 only to identify the stack frame.
+// fContinuable - not used.
+// fAttaching - true iff this exception may initiate a jit-attach. In the common case, if this is true, then
+// CorDebuggerAttached() is false. However, since a debugger can attach at any time, it's possible
+// for another debugger to race against the jit-attach and win. Thus this may err on the side of being true.
+// fForceNonInterceptable - This is used to determine if the exception is continuable (ie "Interceptible",
+// we can handle a DB_IPCE_INTERCEPT_EXCEPTION event for it). If true, then the exception can not be continued.
+// If false, we get continuation status from the exception properties of the current thread.
+//
+// Returns:
+// S_OK on success (common case by far).
+// propogates other errors.
+//
+HRESULT Debugger::SendException(Thread *pThread,
+ bool fFirstChance,
+ SIZE_T currentIP,
+ SIZE_T currentSP,
+ bool fContinuable, // not used by RS.
+ bool fAttaching,
+ bool fForceNonInterceptable,
+ EXCEPTION_POINTERS * pExceptionInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+
+ MODE_ANY;
+
+ PRECONDITION(HasLazyData());
+ PRECONDITION(CheckPointer(pThread));
+ PRECONDITION((pThread->GetFilterContext() == NULL) || !fFirstChance);
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::SendException\n"));
+
+ if (CORDBUnrecoverableError(this))
+ {
+ return (E_FAIL);
+ }
+
+ // Mark if we're at an unsafe place.
+ AtSafePlaceHolder unsafePlaceHolder(pThread);
+
+ // Grab the exception name from the current exception object to pass to the JIT attach.
+ bool fIsInterceptable;
+
+ if (fForceNonInterceptable)
+ {
+ fIsInterceptable = false;
+ m_forceNonInterceptable = true;
+ }
+ else
+ {
+ fIsInterceptable = IsInterceptableException(pThread);
+ m_forceNonInterceptable = false;
+ }
+
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+ BOOL managedEventNeeded = ((!fFirstChance) ||
+ (fFirstChance && (!pExState->GetFlags()->SentDebugFirstChance() || !pExState->GetFlags()->SentDebugUserFirstChance())));
+
+ // There must be a managed exception object to send a managed exception event
+ if (g_pEEInterface->IsThreadExceptionNull(pThread) && (pThread->LastThrownObjectHandle() == NULL))
+ {
+ managedEventNeeded = FALSE;
+ }
+
+ if (fAttaching)
+ {
+ JitAttach(pThread, pExceptionInfo, managedEventNeeded, FALSE);
+ // If the jit-attach occured, CORDebuggerAttached() may now be true and we can
+ // just act as if a debugger was always attached.
+ }
+
+ if(managedEventNeeded)
+ {
+ {
+ // We have to send enabled, so enable now.
+ GCX_PREEMP_EEINTERFACE();
+
+ // Send the exception events. Even in jit-attach case, we should now be fully attached.
+ if (CORDebuggerAttached())
+ {
+ // Initialize frame-pointer associated with exception notification.
+ LPVOID stackPointer;
+ if ((currentSP == 0) && (pExState->GetContextRecord() != NULL))
+ {
+ stackPointer = dac_cast<PTR_VOID>(GetSP(pExState->GetContextRecord()));
+ }
+ else
+ {
+ stackPointer = (LPVOID)currentSP;
+ }
+ FramePointer framePointer = FramePointer::MakeFramePointer(stackPointer);
+
+
+ // Do the real work of sending the events
+ SendExceptionEventsWorker(
+ pThread,
+ fFirstChance,
+ fIsInterceptable,
+ fContinuable,
+ currentIP,
+ framePointer,
+ !unsafePlaceHolder.IsAtUnsafePlace());
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO100, "D:SE: Skipping SendIPCEvent because not supposed to send anything, or RS detached.\n"));
+ }
+ }
+
+ // If we weren't at a safe place when we switched to PREEMPTIVE, then go ahead and unmark that fact now
+ // that we're successfully back in COOPERATIVE mode.
+ unsafePlaceHolder.Clear();
+
+ {
+ GCX_COOP_EEINTERFACE();
+ ProcessAnyPendingEvals(pThread);
+ }
+ }
+
+ if (CORDebuggerAttached())
+ {
+ return S_FALSE;
+ }
+ else
+ {
+ return S_OK;
+ }
+}
+
+
+/*
+ * ProcessAnyPendingEvals
+ *
+ * This function checks for, and then processes, any pending func-evals.
+ *
+ * Parameters:
+ * pThread - The thread to process.
+ *
+ * Returns:
+ * None.
+ *
+ */
+void Debugger::ProcessAnyPendingEvals(Thread *pThread)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+
+ // If no debugger is attached, then no evals to process.
+ // We may get here in oom situations during jit-attach, so we'll check now and be safe.
+ if (!CORDebuggerAttached())
+ {
+ return;
+ }
+
+ //
+ // Note: if there is a filter context installed, we may need remove it, do the eval, then put it back. I'm not 100%
+ // sure which yet... it kinda depends on whether or not we really need the filter context updated due to a
+ // collection during the func eval...
+ //
+ // If we need to do a func eval on this thread, then there will be a pending eval registered for this thread. We'll
+ // loop so long as there are pending evals registered. We block in FuncEvalHijackWorker after sending up the
+ // FuncEvalComplete event, so if the user asks for another func eval then there will be a new pending eval when we
+ // loop and check again.
+ //
+ DebuggerPendingFuncEval *pfe;
+
+ while (GetPendingEvals() != NULL && (pfe = GetPendingEvals()->GetPendingEval(pThread)) != NULL)
+ {
+ DebuggerEval *pDE = pfe->pDE;
+
+ _ASSERTE(pDE->m_evalDuringException);
+ _ASSERTE(pDE->m_thread == GetThread());
+
+ // Remove the pending eval from the hash. This ensures that if we take a first chance exception during the eval
+ // that we can do another nested eval properly.
+ GetPendingEvals()->RemovePendingEval(pThread);
+
+ // Go ahead and do the pending func eval. pDE is invalid after this.
+ void *ret;
+ ret = ::FuncEvalHijackWorker(pDE);
+
+
+ // The return value should be NULL when FuncEvalHijackWorker is called as part of an exception.
+ _ASSERTE(ret == NULL);
+ }
+
+ // If we need to re-throw a ThreadAbortException, go ahead and do it now.
+ if (GetThread()->m_StateNC & Thread::TSNC_DebuggerReAbort)
+ {
+ // Now clear the bit else we'll see it again when we process the Exception notification
+ // from this upcoming UserAbort exception.
+ pThread->ResetThreadStateNC(Thread::TSNC_DebuggerReAbort);
+ pThread->UserAbort(Thread::TAR_Thread, EEPolicy::TA_Safe, INFINITE, Thread::UAC_Normal);
+ }
+
+#endif
+
+}
+
+
+/*
+ * FirstChanceManagedException is called by Runtime threads when crawling the managed stack frame
+ * for a handler for the exception. It is called for each managed call on the stack.
+ *
+ * Parameters:
+ * pThread - The thread the exception is occurring on.
+ * currentIP - the IP in the current stack frame.
+ * currentSP - the SP in the current stack frame.
+ *
+ * Returns:
+ * Always FALSE.
+ *
+ */
+bool Debugger::FirstChanceManagedException(Thread *pThread, SIZE_T currentIP, SIZE_T currentSP)
+{
+
+ // @@@
+ // Implement DebugInterface
+ // Can only be called from EE/exception
+ // must be on managed thread.
+
+ CONTRACTL
+ {
+ THROWS;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+
+ PRECONDITION(CORDebuggerAttached());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::FCE: First chance exception, TID:0x%x, \n", GetThreadIdHelper(pThread)));
+
+ _ASSERTE(GetThread() != NULL);
+
+#ifdef _DEBUG
+ static ConfigDWORD d_fce;
+ if (d_fce.val(CLRConfig::INTERNAL_D__FCE))
+ _ASSERTE(!"Stop in Debugger::FirstChanceManagedException?");
+#endif
+
+ SendException(pThread, TRUE, currentIP, currentSP, FALSE, FALSE, FALSE, NULL);
+
+ return false;
+}
+
+
+/*
+ * FirstChanceManagedExceptionCatcherFound is called by Runtime threads when crawling the
+ * managed stack frame and a handler for the exception is found.
+ *
+ * Parameters:
+ * pThread - The thread the exception is occurring on.
+ * pTct - Contains the function information that has the catch clause.
+ * pEHClause - Contains the native offset information of the catch clause.
+ *
+ * Returns:
+ * None.
+ *
+ */
+void Debugger::FirstChanceManagedExceptionCatcherFound(Thread *pThread,
+ MethodDesc *pMD, TADDR pMethodAddr,
+ BYTE *currentSP,
+ EE_ILEXCEPTION_CLAUSE *pEHClause)
+{
+
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS_FROM_GETJITINFO;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // @@@
+ // Implements DebugInterface
+ // Call by EE/exception. Must be on managed thread
+ _ASSERTE(GetThread() != NULL);
+
+ // Quick check.
+ if (!CORDebuggerAttached())
+ {
+ return;
+ }
+
+ // Compute the offset
+
+ DWORD nOffset = (DWORD)(SIZE_T)ICorDebugInfo::NO_MAPPING;
+ DebuggerMethodInfo *pDebugMethodInfo = NULL;
+ DebuggerJitInfo *pDebugJitInfo = NULL;
+ bool isInJMCFunction = false;
+
+ if (pMD != NULL)
+ {
+ _ASSERTE(!pMD->IsILStub());
+
+ pDebugJitInfo = GetJitInfo(pMD, (const BYTE *) pMethodAddr, &pDebugMethodInfo);
+ if (pDebugMethodInfo != NULL)
+ {
+ isInJMCFunction = pDebugMethodInfo->IsJMCFunction();
+ }
+ }
+
+ // Here we check if debugger opted-out of receiving exception related events from outside of JMC methods
+ // or this exception ever crossed JMC frame (in this case we have already sent user first chance event)
+ if (m_sendExceptionsOutsideOfJMC ||
+ isInJMCFunction ||
+ pThread->GetExceptionState()->GetFlags()->SentDebugUserFirstChance())
+ {
+ if (pDebugJitInfo != NULL)
+ {
+ CorDebugMappingResult mapResult;
+ DWORD which;
+
+ // Map the native instruction to the IL instruction.
+ // Be sure to skip past the prolog on amd64/arm to get the right IL
+ // instruction (on x86 there will not be a prolog as x86 does not use
+ // funclets).
+ nOffset = pDebugJitInfo->MapNativeOffsetToIL(
+ pEHClause->HandlerStartPC,
+ &mapResult,
+ &which,
+ TRUE
+ );
+ }
+
+ bool fIsInterceptable = IsInterceptableException(pThread);
+ m_forceNonInterceptable = false;
+ DWORD dwFlags = fIsInterceptable ? DEBUG_EXCEPTION_CAN_BE_INTERCEPTED : 0;
+
+ FramePointer fp = FramePointer::MakeFramePointer(currentSP);
+ SendCatchHandlerFound(pThread, fp, nOffset, dwFlags);
+ }
+
+ // flag that we catch handler found so that we won't send other mutually exclusive events
+ // such as unwind begin or unhandled
+ pThread->GetExceptionState()->GetFlags()->SetDebugCatchHandlerFound();
+}
+
+// Filter to trigger CHF callback
+// Notify of a catch-handler found callback.
+LONG Debugger::NotifyOfCHFFilter(EXCEPTION_POINTERS* pExceptionPointers, PVOID pData)
+{
+ CONTRACTL
+ {
+ if ((GetThread() == NULL) || g_pEEInterface->IsThreadExceptionNull(GetThread()))
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ else
+ {
+ THROWS;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SCAN_IGNORE_TRIGGER; // Scan can't handle conditional contracts.
+
+ // @@@
+ // Implements DebugInterface
+ // Can only be called from EE
+
+ // If no debugger is attached, then don't bother sending the events.
+ // This can't kick off a jit-attach.
+ if (!CORDebuggerAttached())
+ {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ //
+ // If this exception has never bubbled thru to managed code, then there is no
+ // useful information for the debugger and, in fact, it may be a completely
+ // internally handled runtime exception, so we should do nothing.
+ //
+ if ((GetThread() == NULL) || g_pEEInterface->IsThreadExceptionNull(GetThread()))
+ {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ // Caller must pass in the stack address. This should match up w/ a Frame.
+ BYTE * pCatcherStackAddr = (BYTE*) pData;
+
+ // If we don't have any catcher frame, then use ebp from the context.
+ if (pData == NULL)
+ {
+ pCatcherStackAddr = (BYTE*) GetFP(pExceptionPointers->ContextRecord);
+ }
+ else
+ {
+#ifdef _DEBUG
+ _ASSERTE(pData != NULL);
+ {
+ // We want the CHF stack addr to match w/ the Internal Frame Cordbg sees
+ // in the stacktrace.
+ // The Internal Frame comes from an EE Frame. This means that the CHF stack
+ // addr must match that EE Frame exactly. Let's check that now.
+
+ Frame * pFrame = reinterpret_cast<Frame*>(pData);
+ // Calling a virtual method will enforce that we have a valid Frame. ;)
+ // If we got passed in a random catch address, then when we cast to a Frame
+ // the vtable pointer will be bogus and this call will AV.
+ Frame::ETransitionType e;
+ e = pFrame->GetTransitionType();
+ }
+#endif
+ }
+
+ // @todo - when Stubs-In-Stacktraces is always enabled, remove this.
+ if (!g_EnableSIS)
+ {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ // Stubs don't have an IL offset.
+ const SIZE_T offset = (SIZE_T)ICorDebugInfo::NO_MAPPING;
+ Thread *pThread = GetThread();
+ DWORD dwFlags = IsInterceptableException(pThread) ? DEBUG_EXCEPTION_CAN_BE_INTERCEPTED : 0;
+ m_forceNonInterceptable = false;
+
+ FramePointer fp = FramePointer::MakeFramePointer(pCatcherStackAddr);
+
+ //
+ // If we have not sent a first-chance notification, do so now.
+ //
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+
+ if (!pExState->GetFlags()->SentDebugFirstChance())
+ {
+ SendException(pThread,
+ TRUE, // first-chance
+ (SIZE_T)(GetIP(pExceptionPointers->ContextRecord)), // IP
+ (SIZE_T)pCatcherStackAddr, // SP
+ FALSE, // fContinuable
+ FALSE, // attaching
+ TRUE, // ForceNonInterceptable since we are transition stub, the first and last place
+ // that will see this exception.
+ pExceptionPointers);
+ }
+
+ // Here we check if debugger opted-out of receiving exception related events from outside of JMC methods
+ // or this exception ever crossed JMC frame (in this case we have already sent user first chance event)
+ if (m_sendExceptionsOutsideOfJMC || pExState->GetFlags()->SentDebugUserFirstChance())
+ {
+ SendCatchHandlerFound(pThread, fp, offset, dwFlags);
+ }
+
+ // flag that we catch handler found so that we won't send other mutually exclusive events
+ // such as unwind begin or unhandled
+ pExState->GetFlags()->SetDebugCatchHandlerFound();
+
+#ifdef DEBUGGING_SUPPORTED
+
+
+ if ( (pThread != NULL) &&
+ (pThread->IsExceptionInProgress()) &&
+ (pThread->GetExceptionState()->GetFlags()->DebuggerInterceptInfo()) )
+ {
+ //
+ // The debugger wants to intercept this exception. It may return in a failure case,
+ // in which case we want to continue thru this path.
+ //
+ ClrDebuggerDoUnwindAndIntercept(X86_FIRST_ARG(EXCEPTION_CHAIN_END) pExceptionPointers->ExceptionRecord);
+ }
+#endif // DEBUGGING_SUPPORTED
+
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+
+// Actually send the catch handler found event.
+// This can be used to send CHF for both regular managed catchers as well
+// as stubs that catch (Func-eval, COM-Interop, AppDomains)
+void Debugger::SendCatchHandlerFound(
+ Thread * pThread,
+ FramePointer fp,
+ SIZE_T nOffset,
+ DWORD dwFlags
+)
+{
+
+ CONTRACTL
+ {
+ THROWS;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::FirstChanceManagedExceptionCatcherFound\n"));
+
+ if ((pThread == NULL))
+ {
+ _ASSERTE(!"Bad parameter");
+ LOG((LF_CORDB, LL_INFO10000, "D::FirstChanceManagedExceptionCatcherFound - Bad parameter.\n"));
+ return;
+ }
+
+ if (CORDBUnrecoverableError(this))
+ {
+ return;
+ }
+
+ //
+ // Mark if we're at an unsafe place.
+ //
+ AtSafePlaceHolder unsafePlaceHolder(pThread);
+
+ {
+ GCX_COOP_EEINTERFACE();
+
+ {
+ SENDIPCEVENT_BEGIN(this, pThread);
+
+ if (CORDebuggerAttached() &&
+ !pThread->GetExceptionState()->GetFlags()->DebugCatchHandlerFound() &&
+ !pThread->GetExceptionState()->GetFlags()->SentDebugUnhandled() &&
+ !pThread->GetExceptionState()->GetFlags()->SentDebugUnwindBegin())
+ {
+ HRESULT hr;
+
+ //
+ // Figure out parameters to the IPC events.
+ //
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+
+ //
+ // Send Whidbey EXCEPTION IPC event.
+ //
+ InitIPCEvent(ipce, DB_IPCE_EXCEPTION_CALLBACK2, pThread, pThread->GetDomain());
+
+ ipce->ExceptionCallback2.framePointer = fp;
+ ipce->ExceptionCallback2.eventType = DEBUG_EXCEPTION_CATCH_HANDLER_FOUND;
+ ipce->ExceptionCallback2.nOffset = nOffset;
+ ipce->ExceptionCallback2.dwFlags = dwFlags;
+ ipce->ExceptionCallback2.vmExceptionHandle.SetRawPtr(g_pEEInterface->GetThreadException(pThread));
+
+ LOG((LF_CORDB, LL_INFO10000, "D::FCMECF: sending ExceptionCallback2"));
+ hr = m_pRCThread->SendIPCEvent();
+
+ _ASSERTE(SUCCEEDED(hr) && "D::FCMECF: Send ExceptionCallback2 event failed.");
+
+ //
+ // Stop all Runtime threads
+ //
+ TrapAllRuntimeThreads();
+
+ } // end if (!Attached)
+ else
+ {
+ LOG((LF_CORDB,LL_INFO1000, "D:FCMECF: Skipping SendIPCEvent because RS detached.\n"));
+ }
+
+ //
+ // Let other Runtime threads handle their events.
+ //
+ SENDIPCEVENT_END;
+ }
+
+ //
+ // If we weren't at a safe place when we enabled PGC, then go ahead and unmark that fact now that we've successfully
+ // disabled.
+ //
+ unsafePlaceHolder.Clear();
+
+ ProcessAnyPendingEvals(pThread);
+ } // end of GCX_COOP_EEINTERFACE();
+
+ return;
+}
+
+/*
+ * ManagedExceptionUnwindBegin is called by Runtime threads when crawling the
+ * managed stack frame and unwinding them.
+ *
+ * Parameters:
+ * pThread - The thread the unwind is occurring on.
+ *
+ * Returns:
+ * None.
+ *
+ */
+void Debugger::ManagedExceptionUnwindBegin(Thread *pThread)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ // @@@
+ // Implements DebugInterface
+ // Can only be called on managed threads
+ //
+
+ LOG((LF_CORDB, LL_INFO10000, "D::ManagedExceptionUnwindBegin\n"));
+
+ if (pThread == NULL)
+ {
+ _ASSERTE(!"Bad parameter");
+ LOG((LF_CORDB, LL_INFO10000, "D::ManagedExceptionUnwindBegin - Bad parameter.\n"));
+ return;
+ }
+
+ if (CORDBUnrecoverableError(this))
+ {
+ return;
+ }
+
+ //
+ // Mark if we're at an unsafe place.
+ //
+ AtSafePlaceHolder unsafePlaceHolder(pThread);
+ {
+ GCX_COOP_EEINTERFACE();
+
+ {
+ SENDIPCEVENT_BEGIN(this, pThread);
+
+ if (CORDebuggerAttached() &&
+ !pThread->GetExceptionState()->GetFlags()->SentDebugUnwindBegin() &&
+ !pThread->GetExceptionState()->GetFlags()->DebugCatchHandlerFound() &&
+ !pThread->GetExceptionState()->GetFlags()->SentDebugUnhandled())
+ {
+ HRESULT hr;
+
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+
+ //
+ // Send Whidbey EXCEPTION IPC event.
+ //
+ InitIPCEvent(ipce, DB_IPCE_EXCEPTION_UNWIND, pThread, pThread->GetDomain());
+
+ ipce->ExceptionUnwind.eventType = DEBUG_EXCEPTION_UNWIND_BEGIN;
+ ipce->ExceptionUnwind.dwFlags = 0;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::MEUB: sending ExceptionUnwind event"));
+ hr = m_pRCThread->SendIPCEvent();
+
+ _ASSERTE(SUCCEEDED(hr) && "D::MEUB: Send ExceptionUnwind event failed.");
+
+ pThread->GetExceptionState()->GetFlags()->SetSentDebugUnwindBegin();
+
+ //
+ // Stop all Runtime threads
+ //
+ TrapAllRuntimeThreads();
+
+ } // end if (!Attached)
+
+ //
+ // Let other Runtime threads handle their events.
+ //
+ SENDIPCEVENT_END;
+ }
+
+ //
+ // If we weren't at a safe place when we enabled PGC, then go ahead and unmark that fact now that we've successfully
+ // disabled.
+ //
+ unsafePlaceHolder.Clear();
+ }
+
+ return;
+}
+
+/*
+ * DeleteInterceptContext
+ *
+ * This function is called by the VM to release any debugger specific information for an
+ * exception object. It is called when the VM releases its internal exception stuff, i.e.
+ * ExInfo on X86 and ExceptionTracker on WIN64.
+ *
+ *
+ * Parameters:
+ * pContext - Debugger specific context.
+ *
+ * Returns:
+ * None.
+ *
+ * Notes:
+ * pContext is just a pointer to a DebuggerContinuableExceptionBreakpoint.
+ *
+ */
+void Debugger::DeleteInterceptContext(void *pContext)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DebuggerContinuableExceptionBreakpoint *pBp = (DebuggerContinuableExceptionBreakpoint *)pContext;
+
+ if (pBp != NULL)
+ {
+ DeleteInteropSafe(pBp);
+ }
+}
+
+
+// Get the frame point for an exception handler
+FramePointer GetHandlerFramePointer(BYTE *pStack)
+{
+ FramePointer handlerFP;
+
+#if !defined(_TARGET_ARM_)
+ // Refer to the comment in DispatchUnwind() to see why we have to add
+ // sizeof(LPVOID) to the handler ebp.
+ handlerFP = FramePointer::MakeFramePointer(LPVOID(pStack + sizeof(void*)));
+#else
+ // ARM is similar to IA64 in that it uses the establisher frame as the
+ // handler. in this case we don't need to add sizeof(void*) to the FP.
+ handlerFP = FramePointer::MakeFramePointer((LPVOID)pStack);
+#endif // _TARGET_ARM_
+
+ return handlerFP;
+}
+
+//
+// ExceptionFilter is called by the Runtime threads when an exception
+// is being processed.
+// - fd - MethodDesc of filter function
+// - pMethodAddr - any address inside of the method. This lets us resolve exactly which version
+// of the method is being executed (for EnC)
+// - offset - native offset to handler.
+// - pStack, pBStore - stack pointers.
+//
+void Debugger::ExceptionFilter(MethodDesc *fd, TADDR pMethodAddr, SIZE_T offset, BYTE *pStack)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ PRECONDITION(!IsDbgHelperSpecialThread());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_INFO10000, "D::EF: pStack:0x%x MD: %s::%s, offset:0x%x\n",
+ pStack, fd->m_pszDebugClassName, fd->m_pszDebugMethodName, offset));
+
+ //
+ // !!! Need to think through logic for when to step through filter code -
+ // perhaps only during a "step in".
+ //
+
+ //
+ // !!! Eventually there may be some weird mechanics introduced for
+ // returning from the filter that we have to understand. For now we should
+ // be able to proceed normally.
+ //
+
+ FramePointer handlerFP;
+ handlerFP = GetHandlerFramePointer(pStack);
+
+ DebuggerJitInfo * pDJI = NULL;
+ EX_TRY
+ {
+ pDJI = GetJitInfo(fd, (const BYTE *) pMethodAddr);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (!fd->IsDynamicMethod() && (pDJI == NULL))
+ {
+ // The only way we shouldn't have a DJI is from a dynamic method or from oom (which the LS doesn't handle).
+ _ASSERTE(!"Debugger doesn't support OOM scenarios.");
+ return;
+ }
+
+ DebuggerController::DispatchUnwind(g_pEEInterface->GetThread(),
+ fd, pDJI, offset, handlerFP, STEP_EXCEPTION_FILTER);
+}
+
+
+//
+// ExceptionHandle is called by Runtime threads when an exception is
+// being handled.
+// - fd - MethodDesc of filter function
+// - pMethodAddr - any address inside of the method. This lets us resolve exactly which version
+// of the method is being executed (for EnC)
+// - offset - native offset to handler.
+// - pStack, pBStore - stack pointers.
+//
+void Debugger::ExceptionHandle(MethodDesc *fd, TADDR pMethodAddr, SIZE_T offset, BYTE *pStack)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ PRECONDITION(!IsDbgHelperSpecialThread());
+ }
+ CONTRACTL_END;
+
+
+ FramePointer handlerFP;
+ handlerFP = GetHandlerFramePointer(pStack);
+
+ DebuggerJitInfo * pDJI = NULL;
+ EX_TRY
+ {
+ pDJI = GetJitInfo(fd, (const BYTE *) pMethodAddr);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if (!fd->IsDynamicMethod() && (pDJI == NULL))
+ {
+ // The only way we shouldn't have a DJI is from a dynamic method or from oom (which the LS doesn't handle).
+ _ASSERTE(!"Debugger doesn't support OOM scenarios.");
+ return;
+ }
+
+
+ DebuggerController::DispatchUnwind(g_pEEInterface->GetThread(),
+ fd, pDJI, offset, handlerFP, STEP_EXCEPTION_HANDLER);
+}
+
+BOOL Debugger::ShouldAutoAttach()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(!CORDebuggerAttached());
+
+ // We're relying on the caller to determine the
+
+ LOG((LF_CORDB, LL_INFO1000000, "D::SAD\n"));
+
+ // Check if the user has specified a seting in the registry about what he
+ // wants done when an unhandled exception occurs.
+ DebuggerLaunchSetting dls = GetDbgJITDebugLaunchSetting();
+
+ return (dls == DLS_ATTACH_DEBUGGER);
+
+ // @TODO cache the debugger launch setting.
+
+}
+
+BOOL Debugger::FallbackJITAttachPrompt()
+{
+ _ASSERTE(!CORDebuggerAttached());
+ return (ATTACH_YES == this->ShouldAttachDebuggerProxy(false));
+}
+
+void Debugger::MarkDebuggerAttachedInternal()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // Attach is complete now.
+ LOG((LF_CORDB, LL_INFO10000, "D::FEDA: Attach Complete!\n"));
+ g_pEEInterface->MarkDebuggerAttached();
+
+ _ASSERTE(HasLazyData());
+}
+void Debugger::MarkDebuggerUnattachedInternal()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(HasLazyData());
+
+ g_pEEInterface->MarkDebuggerUnattached();
+}
+
+//-----------------------------------------------------------------------------
+// Favor to do lazy initialization on helper thread.
+// This is needed to allow lazy intialization in Stack Overflow scenarios.
+// We may or may not already be initialized.
+//-----------------------------------------------------------------------------
+void LazyInitFavor(void *)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+ Debugger::DebuggerLockHolder dbgLockHolder(g_pDebugger);
+ HRESULT hr;
+ hr = g_pDebugger->LazyInitWrapper();
+ (void)hr; //prevent "unused variable" error from GCC
+
+ // On checked builds, warn that we're hitting a scenario that debugging doesn't support.
+ _ASSERTE(SUCCEEDED(hr) || !"Couldn't initialize lazy data for LastChanceManagedException");
+}
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+LONG Debugger::LastChanceManagedException(EXCEPTION_POINTERS * pExceptionInfo,
+ Thread *pThread,
+ BOOL jitAttachRequested)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // @@@
+ // Implements DebugInterface.
+ // Can be run only on managed thread.
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LastChanceManagedException\n"));
+
+ // Don't stop for native debugging anywhere inside our inproc-Filters.
+ CantStopHolder hHolder;
+
+ EXCEPTION_RECORD * pExceptionRecord = pExceptionInfo->ExceptionRecord;
+ CONTEXT * pContext = pExceptionInfo->ContextRecord;
+
+ // You're allowed to call this function with a NULL exception record and context. If you do, then its assumed
+ // that we want to head right down to asking the user if they want to attach a debugger. No need to try to
+ // dispatch the exception to the debugger controllers. You have to pass NULL for both the exception record and
+ // the context, though. They're a pair. Both have to be NULL, or both have to be valid.
+ _ASSERTE(((pExceptionRecord != NULL) && (pContext != NULL)) ||
+ ((pExceptionRecord == NULL) && (pContext == NULL)));
+
+ if (CORDBUnrecoverableError(this))
+ {
+ return ExceptionContinueSearch;
+ }
+
+ // We don't do anything on the second pass
+ if ((pExceptionRecord != NULL) && ((pExceptionRecord->ExceptionFlags & EXCEPTION_UNWINDING) != 0))
+ {
+ return ExceptionContinueSearch;
+ }
+
+ // Let the controllers have a chance at it - this may be the only handler which can catch the exception if this
+ // is a native patch.
+
+ if ((pThread != NULL) &&
+ (pContext != NULL) &&
+ CORDebuggerAttached() &&
+ DebuggerController::DispatchNativeException(pExceptionRecord,
+ pContext,
+ pExceptionRecord->ExceptionCode,
+ pThread))
+ {
+ return ExceptionContinueExecution;
+ }
+
+ // Otherwise, run our last chance exception logic
+ ATTACH_ACTION action;
+ action = ATTACH_NO;
+
+ if (CORDebuggerAttached() || jitAttachRequested)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "D::BEH ... debugger attached.\n"));
+
+ Thread *thread = g_pEEInterface->GetThread();
+ _ASSERTE((thread != NULL) && (thread == pThread));
+
+ // ExceptionFlags is 0 for continuable, EXCEPTION_NONCONTINUABLE otherwise. Note that if we don't have an
+ // exception record, then we assume this is a non-continuable exception.
+ bool continuable = (pExceptionRecord != NULL) && (pExceptionRecord->ExceptionFlags == 0);
+
+ LOG((LF_CORDB, LL_INFO10000, "D::BEH ... sending exception.\n"));
+
+ HRESULT hr = E_FAIL;
+
+ // In the jit-attach case, lazy-init. We may be in a stack-overflow, so do it via a favor to avoid
+ // using this thread's stack space.
+ if (jitAttachRequested)
+ {
+ m_pRCThread->DoFavor((FAVORCALLBACK) LazyInitFavor, NULL);
+ }
+
+ // The only way we don't have lazy data at this point is in an OOM scenario, which
+ // the debugger doesn't support.
+ if (!HasLazyData())
+ {
+ return ExceptionContinueSearch;
+ }
+
+
+ // In Whidbey, we used to set the filter CONTEXT when we hit an unhandled exception while doing
+ // mixed-mode debugging. This helps the debugger walk the stack since it can skip the leaf
+ // portion of the stack (including stack frames in the runtime) and start the stackwalk at the
+ // faulting stack frame. The code to set the filter CONTEXT is in a hijack function which is only
+ // used during mixed-mode debugging.
+ if (m_pRCThread->GetDCB()->m_rightSideIsWin32Debugger)
+ {
+ GCX_COOP();
+
+ _ASSERTE(thread->GetFilterContext() == NULL);
+ thread->SetFilterContext(pExceptionInfo->ContextRecord);
+ }
+ EX_TRY
+ {
+ // We pass the attaching status to SendException so that it knows
+ // whether to attach a debugger or not. We should really do the
+ // attach stuff out here and not bother with the flag.
+ hr = SendException(thread,
+ FALSE,
+ ((pContext != NULL) ? (SIZE_T)GetIP(pContext) : NULL),
+ ((pContext != NULL) ? (SIZE_T)GetSP(pContext) : NULL),
+ continuable,
+ !!jitAttachRequested, // If we are JIT attaching on an unhandled exceptioin, we force
+ !!jitAttachRequested, // the exception to be uninterceptable.
+ pExceptionInfo);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ if (m_pRCThread->GetDCB()->m_rightSideIsWin32Debugger)
+ {
+ GCX_COOP();
+
+ thread->SetFilterContext(NULL);
+ }
+ }
+ else
+ {
+ // Note: we don't do anything on NO or TERMINATE. We just return to the exception logic, which will abort the
+ // app or not depending on what the CLR impl decides is appropiate.
+ _ASSERTE(action == ATTACH_TERMINATE || action == ATTACH_NO);
+ }
+
+ return ExceptionContinueSearch;
+}
+
+//
+// NotifyUserOfFault notifies the user of a fault (unhandled exception
+// or user breakpoint) in the process, giving them the option to
+// attach a debugger or terminate the application.
+//
+int Debugger::NotifyUserOfFault(bool userBreakpoint, DebuggerLaunchSetting dls)
+{
+ LOG((LF_CORDB, LL_INFO1000000, "D::NotifyUserOfFault\n"));
+
+ CONTRACTL
+ {
+ NOTHROW;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ int result = IDCANCEL;
+
+ if (!CORDebuggerAttached())
+ {
+ DWORD pid;
+ DWORD tid;
+
+ pid = GetCurrentProcessId();
+ tid = GetCurrentThreadId();
+
+ DWORD flags = 0;
+ UINT resIDMessage = 0;
+
+ if (userBreakpoint)
+ {
+ resIDMessage = IDS_DEBUG_USER_BREAKPOINT_MSG;
+ flags |= MB_ABORTRETRYIGNORE | MB_ICONEXCLAMATION;
+ }
+ else
+ {
+ resIDMessage = IDS_DEBUG_UNHANDLED_EXCEPTION_MSG;
+ flags |= MB_OKCANCEL | MB_ICONEXCLAMATION;
+ }
+
+ {
+ // Another potential hang. This may get run on the helper if we have a stack overflow.
+ // Hopefully the odds of 1 thread hitting a stack overflow while another is stuck holding the heap
+ // lock is very small.
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+
+ result = MessageBox(resIDMessage, IDS_DEBUG_SERVICE_CAPTION,
+ flags, TRUE, TRUE, pid, pid, tid, tid);
+ }
+ }
+
+ LOG((LF_CORDB, LL_INFO1000000, "D::NotifyUserOfFault left\n"));
+ return result;
+}
+
+
+// Proxy for ShouldAttachDebugger
+struct ShouldAttachDebuggerParams {
+ Debugger* m_pThis;
+ bool m_fIsUserBreakpoint;
+ Debugger::ATTACH_ACTION m_retval;
+};
+
+// This is called by the helper thread
+void ShouldAttachDebuggerStub(ShouldAttachDebuggerParams * p)
+{
+ WRAPPER_NO_CONTRACT;
+
+ p->m_retval = p->m_pThis->ShouldAttachDebugger(p->m_fIsUserBreakpoint);
+}
+
+// This gets called just like the normal version, but it sends the call over to the helper thread
+Debugger::ATTACH_ACTION Debugger::ShouldAttachDebuggerProxy(bool fIsUserBreakpoint)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if (!HasLazyData())
+ {
+ DebuggerLockHolder lockHolder(this);
+ HRESULT hr = LazyInitWrapper();
+ if (FAILED(hr))
+ {
+ // We already stress logged this case.
+ return ATTACH_NO;
+ }
+ }
+
+
+ if (!IsGuardPageGone())
+ return ShouldAttachDebugger(fIsUserBreakpoint);
+
+ ShouldAttachDebuggerParams p;
+ p.m_pThis = this;
+ p.m_fIsUserBreakpoint = fIsUserBreakpoint;
+
+ LOG((LF_CORDB, LL_INFO1000000, "D::SADProxy\n"));
+ m_pRCThread->DoFavor((FAVORCALLBACK) ShouldAttachDebuggerStub, &p);
+ LOG((LF_CORDB, LL_INFO1000000, "D::SADProxy return %d\n", p.m_retval));
+
+ return p.m_retval;
+}
+
+//---------------------------------------------------------------------------------------
+// Do policy to determine if we should attach a debugger.
+//
+// Arguments:
+// fIsUserBreakpoint - true iff this is in response to a user-breakpoint, else false.
+//
+// Returns:
+// Action to perform based off policy.
+// ATTACH_NO if a debugger is already attached.
+Debugger::ATTACH_ACTION Debugger::ShouldAttachDebugger(bool fIsUserBreakpoint)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+
+ LOG((LF_CORDB, LL_INFO1000000, "D::SAD\n"));
+
+ // If the debugger is already attached, not necessary to re-attach
+ if (CORDebuggerAttached())
+ {
+ return ATTACH_NO;
+ }
+
+ // Check if the user has specified a seting in the registry about what he wants done when an unhandled exception
+ // occurs.
+ DebuggerLaunchSetting dls = GetDbgJITDebugLaunchSetting();
+
+
+ if (dls == DLS_ATTACH_DEBUGGER)
+ {
+ return ATTACH_YES;
+ }
+ else
+ {
+ // Only ask the user once if they wish to attach a debugger. This is because LastChanceManagedException can be called
+ // twice, which causes ShouldAttachDebugger to be called twice, which causes the user to have to answer twice.
+ static BOOL s_fHasAlreadyAsked = FALSE;
+ static ATTACH_ACTION s_action;
+
+
+ // This lock is also part of the above workaround.
+ // Must go to preemptive to take this lock since we'll trigger down the road.
+ GCX_PREEMP();
+ DebuggerLockHolder lockHolder(this);
+
+ // We always want to ask about user breakpoints!
+ if (!s_fHasAlreadyAsked || fIsUserBreakpoint)
+ {
+ if (!fIsUserBreakpoint)
+ s_fHasAlreadyAsked = TRUE;
+
+ // While we could theoretically run into a deadlock if another thread
+ // which acquires the debugger lock in cooperative GC mode is blocked
+ // on this thread while it is running arbitrary user code out of the
+ // MessageBox message pump, given that this codepath will only be used
+ // on Win9x and that the chances of this happenning are quite slim,
+ // for Whidbey a GCViolation is acceptable.
+ CONTRACT_VIOLATION(GCViolation);
+
+ // Ask the user if they want to attach
+ int iRes = NotifyUserOfFault(fIsUserBreakpoint, dls);
+
+ // If it's a user-defined breakpoint, they must hit Retry to launch
+ // the debugger. If it's an unhandled exception, user must press
+ // Cancel to attach the debugger.
+ if ((iRes == IDCANCEL) || (iRes == IDRETRY))
+ s_action = ATTACH_YES;
+
+ else if ((iRes == IDABORT) || (iRes == IDOK))
+ s_action = ATTACH_TERMINATE;
+
+ else
+ s_action = ATTACH_NO;
+ }
+
+ // dbgLockHolder goes out of scope - implicit Release
+ return s_action;
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+// SendUserBreakpoint is called by Runtime threads to send that they've hit
+// a user breakpoint to the Right Side.
+//
+// Parameters:
+// thread - managed thread that the breakpoint is on
+//
+// Notes:
+// A user breakpoint is generally triggered by a call to System.Diagnostics.Debugger.Break.
+// This can be very common. VB's 'stop' statement compiles to a Debugger.Break call.
+// Some other CLR facilities (MDAs) may call this directly too.
+//
+// This may trigger a Jit attach.
+// If the debugger is already attached, this will issue a step-out so that the UserBreakpoint
+// appears to come from the callsite.
+void Debugger::SendUserBreakpoint(Thread * thread)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+
+ PRECONDITION(thread != NULL);
+ PRECONDITION(thread == ::GetThread());
+ }
+ CONTRACTL_END;
+
+
+#ifdef _DEBUG
+ // For testing Watson, we want a consistent way to be able to generate a
+ // Fatal Execution Error
+ // So we have a debug-only knob in this particular managed call that can be used
+ // to artificially inject the error.
+ // This is only for testing.
+ static int fDbgInjectFEE = -1;
+
+ if (fDbgInjectFEE == -1)
+ fDbgInjectFEE = UnsafeGetConfigDWORD(CLRConfig::INTERNAL_DbgInjectFEE);
+
+ if (fDbgInjectFEE)
+ {
+ STRESS_LOG0(LF_CORDB, LL_INFO10000, "Debugger posting bogus FEE b/c knob DbgInjectFEE is set.\n");
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ // These never return.
+ }
+#endif
+
+ if (CORDBUnrecoverableError(this))
+ {
+ return;
+ }
+
+ // UserBreakpoint behaves differently if we're under a debugger vs. a jit-attach.
+ // If we're under the debugger, it does an additional step-out to get us back to the call site.
+
+ // If already attached, then do a step-out and send the userbreak event.
+ if (CORDebuggerAttached())
+ {
+ // A debugger is already attached, so setup a DebuggerUserBreakpoint controller to get us out of the helper
+ // that got us here. The DebuggerUserBreakpoint will call AttachDebuggerForBreakpoint for us when we're out
+ // of the helper. The controller will delete itself when its done its work.
+ DebuggerUserBreakpoint::HandleDebugBreak(thread);
+ return;
+ }
+
+ ATTACH_ACTION dbgAction = ShouldAttachDebugger(true);
+
+ // No debugger is attached. Consider a JIT attach.
+ // This will do ShouldAttachDebugger() and wait for the results.
+ // - It may terminate if the user requested that.
+ // - It may do a full jit-attach.
+ if (dbgAction == ATTACH_YES)
+ {
+ JitAttach(thread, NULL, TRUE, FALSE);
+ }
+ else if (dbgAction == ATTACH_TERMINATE)
+ {
+ // ATTACH_TERMINATE indicates the the user wants to terminate the app.
+ LOG((LF_CORDB, LL_INFO10000, "D::SUB: terminating this process due to user request\n"));
+
+ // Should this go through the host?
+ TerminateProcess(GetCurrentProcess(), 0);
+ _ASSERTE(!"Should never reach this point.");
+ }
+ else
+ {
+ _ASSERTE(dbgAction == ATTACH_NO);
+ }
+
+ if (CORDebuggerAttached())
+ {
+ // On jit-attach, we just send the UserBreak event. Don't do an extra step-out.
+ SendUserBreakpointAndSynchronize(thread);
+ }
+ else if (IsDebuggerPresent())
+ {
+ DebugBreak();
+ }
+}
+
+
+// void Debugger::ThreadCreated(): ThreadCreated is called when
+// a new Runtime thread has been created, but before its ever seen
+// managed code. This is a callback invoked by the EE into the Debugger.
+// This will create a DebuggerThreadStarter patch, which will set
+// a patch at the first instruction in the managed code. When we hit
+// that patch, the DebuggerThreadStarter will invoke ThreadStarted, below.
+//
+// Thread* pRuntimeThread: The EE Thread object representing the
+// runtime thread that has just been created.
+void Debugger::ThreadCreated(Thread* pRuntimeThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // @@@
+ // This function implements the DebugInterface. But it is also called from Attach
+ // logic internally.
+ //
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ LOG((LF_CORDB, LL_INFO100, "D::TC: thread created for 0x%x. ******\n",
+ GetThreadIdHelper(pRuntimeThread)));
+
+ // Sanity check the thread.
+ _ASSERTE(pRuntimeThread != NULL);
+ _ASSERTE(pRuntimeThread->GetThreadId() != 0);
+
+
+ // Create a thread starter and enable its WillEnterManaged code
+ // callback. This will cause the starter to trigger once the
+ // thread has hit managed code, which will cause
+ // Debugger::ThreadStarted() to be called. NOTE: the starter will
+ // be deleted automatically when its done its work.
+ DebuggerThreadStarter *starter = new (interopsafe, nothrow) DebuggerThreadStarter(pRuntimeThread);
+
+ if (starter == NULL)
+ {
+ CORDBDebuggerSetUnrecoverableWin32Error(this, 0, false);
+ return;
+ }
+
+ starter->EnableTraceCall(LEAF_MOST_FRAME);
+}
+
+
+// void Debugger::ThreadStarted(): ThreadStarted is called when
+// a new Runtime thread has reached its first managed code. This is
+// called by the DebuggerThreadStarter patch's SendEvent method.
+//
+// Thread* pRuntimeThread: The EE Thread object representing the
+// runtime thread that has just hit managed code.
+void Debugger::ThreadStarted(Thread* pRuntimeThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // @@@
+ // This method implemented DebugInterface but it is also called from Controller
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ LOG((LF_CORDB, LL_INFO100, "D::TS: thread attach : ID=%#x AD:%#x\n",
+ GetThreadIdHelper(pRuntimeThread), pRuntimeThread->GetDomain()));
+
+ // We just need to send a VMPTR_Thread. The RS will get everything else it needs from DAC.
+ //
+
+ _ASSERTE((g_pEEInterface->GetThread() &&
+ !g_pEEInterface->GetThread()->m_fPreemptiveGCDisabled) ||
+ g_fInControlC);
+ _ASSERTE(ThreadHoldsLock());
+
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce,
+ DB_IPCE_THREAD_ATTACH,
+ pRuntimeThread,
+ pRuntimeThread->GetDomain());
+
+
+ m_pRCThread->SendIPCEvent();
+
+ //
+ // Well, if this thread got created _after_ we started sync'ing
+ // then its Runtime thread flags don't have the fact that there
+ // is a debug suspend pending. We need to call over to the
+ // Runtime and set the flag in the thread now...
+ //
+ if (m_trappingRuntimeThreads)
+ {
+ g_pEEInterface->MarkThreadForDebugSuspend(pRuntimeThread);
+ }
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// DetachThread is called by Runtime threads when they are completing
+// their execution and about to be destroyed.
+//
+// Arguments:
+// pRuntimeThread - Pointer to the runtime's thread object to detach.
+//
+// Return Value:
+// None
+//
+//---------------------------------------------------------------------------------------
+void Debugger::DetachThread(Thread *pRuntimeThread)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ if (CORDBUnrecoverableError(this))
+ {
+ return;
+ }
+
+ if (m_ignoreThreadDetach)
+ {
+ return;
+ }
+
+ _ASSERTE (pRuntimeThread != NULL);
+
+
+ LOG((LF_CORDB, LL_INFO100, "D::DT: thread detach : ID=%#x AD:%#x.\n",
+ GetThreadIdHelper(pRuntimeThread), pRuntimeThread->GetDomain()));
+
+
+ // We may be killing a thread before the Thread-starter fired.
+ // So check (and cancel) any outstanding thread-starters.
+ // If we don't, this old thread starter may conflict w/ a new thread-starter
+ // if AppDomains or EE Thread's get recycled.
+ DebuggerController::CancelOutstandingThreadStarter(pRuntimeThread);
+
+ // Controller lock is bigger than debugger lock.
+ // Don't take debugger lock before the CancelOutStandingThreadStarter function.
+ SENDIPCEVENT_BEGIN(this, pRuntimeThread);
+
+ if (CORDebuggerAttached())
+ {
+ // Send a detach thread event to the Right Side.
+ DebuggerIPCEvent * pEvent = m_pRCThread->GetIPCEventSendBuffer();
+
+ InitIPCEvent(pEvent,
+ DB_IPCE_THREAD_DETACH,
+ pRuntimeThread,
+ pRuntimeThread->GetDomain());
+
+ m_pRCThread->SendIPCEvent();
+
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+
+ // This prevents a race condition where we blocked on the Lock()
+ // above while another thread was sending an event and while we
+ // were blocked the debugger suspended us and so we wouldn't be
+ // resumed after the suspension about to happen below.
+ pRuntimeThread->ResetThreadStateNC(Thread::TSNC_DebuggerUserSuspend);
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO1000, "D::DT: Skipping SendIPCEvent because RS detached."));
+ }
+
+ SENDIPCEVENT_END;
+}
+
+
+//
+// SuspendComplete is called when the last Runtime thread reaches a safe point in response to having its trap flags set.
+// This may be called on either the real helper thread or someone doing helper thread duty.
+//
+BOOL Debugger::SuspendComplete()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+
+ // This will is conceptually mode-cooperative.
+ // But we haven't marked the runtime as stopped yet (m_stopped), so the contract
+ // subsystem doesn't realize it yet.
+ DISABLED(MODE_COOPERATIVE);
+ }
+ CONTRACTL_END;
+
+ // @@@
+ // Call from RCThread::MainLoop and TemporaryHelperThreadMainLoop.
+ // when all threads suspended. Can happen on managed thread or helper thread.
+ // If happen on managed thread, it must be doing the helper thread duty.
+ //
+
+ _ASSERTE(ThreadStore::HoldingThreadStore() || g_fProcessDetach);
+
+ // We should be holding debugger lock m_mutex.
+ _ASSERTE(ThreadHoldsLock());
+
+ // We can't throw here (we're in the middle of the runtime suspension logic).
+ // But things below us throw. So we catch the exception, but then what state are we in?
+
+ _ASSERTE((!g_pEEInterface->GetThread() || !g_pEEInterface->GetThread()->m_fPreemptiveGCDisabled) || g_fInControlC);
+ _ASSERTE(ThisIsHelperThreadWorker());
+
+ STRESS_LOG0(LF_CORDB, LL_INFO10000, "D::SC: suspension complete\n");
+
+ // We have suspended runtime.
+
+ // We're stopped now. Marking m_stopped allows us to use MODE_COOPERATIVE contracts.
+ _ASSERTE(!m_stopped && m_trappingRuntimeThreads);
+ m_stopped = true;
+
+
+ // Send the sync complete event to the Right Side.
+ {
+ // If we fail to send the SyncComplete, what do we do?
+ CONTRACT_VIOLATION(ThrowsViolation);
+
+ SendSyncCompleteIPCEvent(); // sets m_stopped = true...
+ }
+
+ // Everything in the next scope is meant to mimic what we do UnlockForEventSending minus EnableEventHandling.
+ // We do the EEH part when we get the Continue event.
+ {
+#ifdef _DEBUG
+ //_ASSERTE(m_tidLockedForEventSending == GetCurrentThreadId());
+ m_tidLockedForEventSending = 0;
+#endif
+
+ //
+ // Event handling is re-enabled by the RCThread in response to a
+ // continue message from the Right Side.
+
+ }
+
+ // @todo - what should we do if this function failed?
+ return TRUE;
+}
+
+
+
+
+//---------------------------------------------------------------------------------------
+//
+// Debugger::SendCreateAppDomainEvent - notify the RS of an AppDomain
+//
+// Arguments:
+// pRuntimeAppdomain - pointer to the AppDomain
+//
+// Return Value:
+// None
+//
+// Notes:
+// This is used to notify the debugger of either a newly created
+// AppDomain (when fAttaching is FALSE) or of existing AppDomains
+// at attach time (fAttaching is TRUE). In both cases, this should
+// be called before any LoadModule/LoadAssembly events are sent for
+// this domain. Otherwise the RS will get an event for an AppDomain
+// it doesn't recognize and ASSERT.
+//
+// For the non-attach case this means there is no need to enumerate
+// the assemblies/modules in an AppDomain after sending this event
+// because we know there won't be any.
+//
+
+void Debugger::SendCreateAppDomainEvent(AppDomain * pRuntimeAppDomain)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (CORDBUnrecoverableError(this))
+ {
+ return;
+ }
+
+ STRESS_LOG2(LF_CORDB, LL_INFO10000, "D::SCADE: AppDomain creation:%#08x, %#08x\n",
+ pRuntimeAppDomain, pRuntimeAppDomain->GetId().m_dwId);
+
+
+
+ Thread *pThread = g_pEEInterface->GetThread();
+ SENDIPCEVENT_BEGIN(this, pThread);
+
+
+
+ // We may have detached while waiting in LockForEventSending,
+ // in which case we can't send the event.
+ if (CORDebuggerAttached())
+ {
+ // Send a create appdomain event to the Right Side.
+ DebuggerIPCEvent * pEvent = m_pRCThread->GetIPCEventSendBuffer();
+
+ InitIPCEvent(pEvent,
+ DB_IPCE_CREATE_APP_DOMAIN,
+ pThread,
+ pRuntimeAppDomain);
+
+ // Only send a pointer to the AppDomain, the RS will get everything else via DAC.
+ pEvent->AppDomainData.vmAppDomain.SetRawPtr(pRuntimeAppDomain);
+ m_pRCThread->SendIPCEvent();
+
+ TrapAllRuntimeThreads();
+ }
+
+ // Let other Runtime threads handle their events.
+ SENDIPCEVENT_END;
+
+}
+
+
+
+
+//
+// SendExitAppDomainEvent is called when an app domain is destroyed.
+//
+void Debugger::SendExitAppDomainEvent(AppDomain* pRuntimeAppDomain)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ LOG((LF_CORDB, LL_INFO100, "D::EAD: Exit AppDomain 0x%08x.\n",
+ pRuntimeAppDomain));
+
+ STRESS_LOG3(LF_CORDB, LL_INFO10000, "D::EAD: AppDomain exit:%#08x, %#08x, %#08x\n",
+ pRuntimeAppDomain, pRuntimeAppDomain->GetId().m_dwId, CORDebuggerAttached());
+
+ Thread *thread = g_pEEInterface->GetThread();
+ // Prevent other Runtime threads from handling events.
+ SENDIPCEVENT_BEGIN(this, thread);
+
+ if (CORDebuggerAttached())
+ {
+ if (pRuntimeAppDomain->IsDefaultDomain() )
+ {
+ // The Debugger expects to never get an unload event for the default Domain.
+ // Currently we should never get here because g_fProcessDetach will be true by
+ // the time this method is called. However, we'd like to know if this ever changes
+ _ASSERTE(!"Trying to deliver notification of unload for default domain" );
+ return;
+ }
+
+ // Send the exit appdomain event to the Right Side.
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce,
+ DB_IPCE_EXIT_APP_DOMAIN,
+ thread,
+ pRuntimeAppDomain);
+ m_pRCThread->SendIPCEvent();
+
+ // Delete any left over modules for this appdomain.
+ // Note that we're doing this under the lock.
+ if (m_pModules != NULL)
+ {
+ DebuggerDataLockHolder ch(this);
+ m_pModules->RemoveModules(pRuntimeAppDomain);
+ }
+
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO1000, "D::EAD: Skipping SendIPCEvent because RS detached."));
+ }
+
+ SENDIPCEVENT_END;
+}
+
+
+
+//
+// LoadAssembly is called when a new Assembly gets loaded.
+//
+void Debugger::LoadAssembly(DomainAssembly * pDomainAssembly)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ LOG((LF_CORDB, LL_INFO100, "D::LA: Load Assembly Asy:0x%p AD:0x%p which:%ls\n",
+ pDomainAssembly, pDomainAssembly->GetAppDomain(), pDomainAssembly->GetAssembly()->GetDebugName() ));
+
+ if (!CORDebuggerAttached())
+ {
+ return;
+ }
+
+ Thread *pThread = g_pEEInterface->GetThread();
+ SENDIPCEVENT_BEGIN(this, pThread)
+
+
+ if (CORDebuggerAttached())
+ {
+ // Send a load assembly event to the Right Side.
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce,
+ DB_IPCE_LOAD_ASSEMBLY,
+ pThread,
+ pDomainAssembly->GetAppDomain());
+
+ ipce->AssemblyData.vmDomainAssembly.SetRawPtr(pDomainAssembly);
+
+ m_pRCThread->SendIPCEvent();
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO1000, "D::LA: Skipping SendIPCEvent because RS detached."));
+ }
+
+ // Stop all Runtime threads
+ if (CORDebuggerAttached())
+ {
+ TrapAllRuntimeThreads();
+ }
+
+ SENDIPCEVENT_END;
+}
+
+
+
+//
+// UnloadAssembly is called when a Runtime thread unloads an assembly.
+//
+void Debugger::UnloadAssembly(DomainAssembly * pDomainAssembly)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ LOG((LF_CORDB, LL_INFO100, "D::UA: Unload Assembly Asy:0x%p AD:0x%p which:%ls\n",
+ pDomainAssembly, pDomainAssembly->GetAppDomain(), pDomainAssembly->GetAssembly()->GetDebugName() ));
+
+ Thread *thread = g_pEEInterface->GetThread();
+ // Note that the debugger lock is reentrant, so we may or may not hold it already.
+ SENDIPCEVENT_BEGIN(this, thread);
+
+ // Send the unload assembly event to the Right Side.
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+
+ InitIPCEvent(ipce,
+ DB_IPCE_UNLOAD_ASSEMBLY,
+ thread,
+ pDomainAssembly->GetAppDomain());
+ ipce->AssemblyData.vmDomainAssembly.SetRawPtr(pDomainAssembly);
+
+ SendSimpleIPCEventAndBlock();
+
+ // This will block on the continue
+ SENDIPCEVENT_END;
+
+}
+
+
+
+
+//
+// LoadModule is called when a Runtime thread loads a new module and a debugger
+// is attached. This also includes when a domain-neutral module is "loaded" into
+// a new domain.
+//
+// TODO: remove pszModuleName and perhaps other args.
+void Debugger::LoadModule(Module* pRuntimeModule,
+ LPCWSTR pszModuleName, // module file name.
+ DWORD dwModuleName, // length of pszModuleName in chars, not including null.
+ Assembly *pAssembly,
+ AppDomain *pAppDomain,
+ DomainFile * pDomainFile,
+ BOOL fAttaching)
+{
+
+ CONTRACTL
+ {
+ NOTHROW; // not protected for Throws.
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ // @@@@
+ // Implement DebugInterface but can be called internally as well.
+ // This can be called by EE loading module or when we are attaching called by IteratingAppDomainForAttaching
+ //
+ _ASSERTE(!fAttaching);
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ // If this is a dynamic module, then it's part of a multi-module assembly. The manifest
+ // module within the assembly contains metadata for all the module names in the assembly.
+ // When a new dynamic module is created, the manifest module's metadata is updated to
+ // include the new module (see code:Assembly.CreateDynamicModule).
+ // So we need to update the RS's copy of the metadata. One place the manifest module's
+ // metadata gets used is in code:DacDbiInterfaceImpl.GetModuleSimpleName
+ //
+ // See code:ReflectionModule.CaptureModuleMetaDataToMemory for why we send the metadata-refresh here.
+ if (pRuntimeModule->IsReflection() && !pRuntimeModule->IsManifest() && !fAttaching)
+ {
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ // The loader lookups may throw or togggle GC mode, so do them inside a TRY/Catch and
+ // outside any debugger locks.
+ Module * pManifestModule = pRuntimeModule->GetAssembly()->GetManifestModule();
+
+ _ASSERTE(pManifestModule != pRuntimeModule);
+ _ASSERTE(pManifestModule->IsManifest());
+ _ASSERTE(pManifestModule->GetAssembly() == pRuntimeModule->GetAssembly());
+
+ DomainFile * pManifestDomainFile = pManifestModule->GetDomainFile(pAppDomain);
+
+ DebuggerLockHolder dbgLockHolder(this);
+
+ // Raise the debug event.
+ // This still tells the debugger that the manifest module metadata is invalid and needs to
+ // be refreshed.
+ DebuggerIPCEvent eventMetadataUpdate;
+ InitIPCEvent(&eventMetadataUpdate, DB_IPCE_METADATA_UPDATE, NULL, pAppDomain);
+
+ eventMetadataUpdate.MetadataUpdateData.vmDomainFile.SetRawPtr(pManifestDomainFile);
+
+ SendRawEvent(&eventMetadataUpdate);
+ }
+ EX_CATCH_HRESULT(hr);
+ SIMPLIFYING_ASSUMPTION_SUCCEEDED(hr);
+ }
+
+
+ DebuggerModule * module = NULL;
+
+ Thread *pThread = g_pEEInterface->GetThread();
+ SENDIPCEVENT_BEGIN(this, pThread);
+
+
+#ifdef FEATURE_FUSION
+ // Fix for issue Whidbey - 106398
+ // Populate the pdb to fusion cache.
+
+ //
+ if (pRuntimeModule->IsIStream() == FALSE)
+ {
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+
+ HRESULT hrCopy = S_OK;
+ EX_TRY
+ {
+ pRuntimeModule->FusionCopyPDBs(pRuntimeModule->GetPath());
+ }
+ EX_CATCH_HRESULT(hrCopy); // ignore failures
+ }
+#endif // FEATURE_FUSION
+
+ DebuggerIPCEvent* ipce = NULL;
+
+ // Don't create new record if already loaded. We do still want to send the ModuleLoad event, however.
+ // The RS has logic to ignore duplicate ModuleLoad events. We have to send what could possibly be a dup, though,
+ // due to some really nasty issues with getting proper assembly and module load events from the loader when dealing
+ // with shared assemblies.
+ module = LookupOrCreateModule(pDomainFile);
+ _ASSERTE(module != NULL);
+
+
+ // During a real LoadModule event, debugger can change jit flags.
+ // Can't do this during a fake event sent on attach.
+ // This is cleared after we send the LoadModule event.
+ module->SetCanChangeJitFlags(true);
+
+
+ // @dbgtodo inspection - Check whether the DomainFile we get is consistent with the Module and AppDomain we get.
+ // We should simply things when we actually get rid of DebuggerModule, possibly by just passing the
+ // DomainFile around.
+ _ASSERTE(module->GetDomainFile() == pDomainFile);
+ _ASSERTE(module->GetAppDomain() == pDomainFile->GetAppDomain());
+ _ASSERTE(module->GetRuntimeModule() == pDomainFile->GetModule());
+
+ // Send a load module event to the Right Side.
+ ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce,DB_IPCE_LOAD_MODULE, pThread, pAppDomain);
+
+ ipce->LoadModuleData.vmDomainFile.SetRawPtr(pDomainFile);
+
+ m_pRCThread->SendIPCEvent();
+
+ {
+ // Stop all Runtime threads
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ TrapAllRuntimeThreads();
+ }
+ EX_CATCH_HRESULT(hr); // @dbgtodo synchronization - catch exception and go on to restore state.
+ // Synchronization feature crew needs to figure out what happens to TrapAllRuntimeThreads().
+ }
+
+ SENDIPCEVENT_END;
+
+ // need to update pdb stream for SQL passed in pdb stream
+ // regardless attach or not.
+ //
+ if (pRuntimeModule->IsIStream())
+ {
+ // Just ignore failures. Caller was just sending a debug event and we don't
+ // want that to interop non-debugging functionality.
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ SendUpdateModuleSymsEventAndBlock(pRuntimeModule, pAppDomain);
+ }
+ EX_CATCH_HRESULT(hr);
+ }
+
+ // Now that we're done with the load module event, can no longer change Jit flags.
+ module->SetCanChangeJitFlags(false);
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Special LS-only notification that a module has reached the FILE_LOADED level. For now
+// this is only useful to bind breakpoints in generic instantiations from NGENd modules
+// that we couldn't bind earlier (at LoadModule notification time) because the method
+// iterator refuses to consider modules earlier than the FILE_LOADED level. Normally
+// generic instantiations would have their breakpoints bound when they get JITted, but in
+// the case of NGEN that may never happen, so we need to bind them here.
+//
+// Arguments:
+// * pRuntimeModule - Module that just loaded
+// * pAppDomain - AD into which the Module was loaded
+//
+// Assumptions:
+// This is called during the loading process, and blocks that process from
+// completing. The module has reached the FILE_LOADED stage, but typically not yet
+// the IsReadyForTypeLoad stage.
+//
+
+void Debugger::LoadModuleFinished(Module * pRuntimeModule, AppDomain * pAppDomain)
+{
+ CONTRACTL
+ {
+ SUPPORTS_DAC;
+ STANDARD_VM_CHECK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pRuntimeModule != NULL);
+ _ASSERTE(pAppDomain != NULL);
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ // Just as an optimization, skip binding breakpoints if there's no debugger attached.
+ // If a debugger attaches at some point after here, it will be able to bind patches
+ // by making the request at that time. If a debugger detaches at some point after
+ // here, there's no harm in having extra patches bound.
+ if (!CORDebuggerAttached())
+ return;
+
+ // For now, this notification only does interesting work if the module that loaded is
+ // an NGENd module, because all we care about in this notification is ensuring NGENd
+ // methods get breakpoints bound on them
+ if (!pRuntimeModule->HasNativeImage())
+ return;
+
+ // This notification is called just before MODULE_READY_FOR_TYPELOAD gets set. But
+ // for shared modules (loaded into multiple domains), MODULE_READY_FOR_TYPELOAD has
+ // already been set if this module was already loaded into an earlier domain. For
+ // such cases, there's no need to bind breakpoints now because the module has already
+ // been fully loaded into at least one domain, and breakpoint binding has already
+ // been done for us
+ if (pRuntimeModule->IsReadyForTypeLoad())
+ return;
+
+#ifdef _DEBUG
+ {
+ // This notification is called once the module is loaded
+ DomainFile * pDomainFile = pRuntimeModule->FindDomainFile(pAppDomain);
+ _ASSERTE((pDomainFile != NULL) && (pDomainFile->GetLoadLevel() >= FILE_LOADED));
+ }
+#endif // _DEBUG
+
+ // Find all IL Master patches for this module, and bind & activate their
+ // corresponding slave patches.
+ {
+ DebuggerController::ControllerLockHolder ch;
+
+ HASHFIND info;
+ DebuggerPatchTable * pTable = DebuggerController::GetPatchTable();
+
+ for (DebuggerControllerPatch * pMasterPatchCur = pTable->GetFirstPatch(&info);
+ pMasterPatchCur != NULL;
+ pMasterPatchCur = pTable->GetNextPatch(&info))
+ {
+ if (!pMasterPatchCur->IsILMasterPatch())
+ continue;
+
+ DebuggerMethodInfo *dmi = GetOrCreateMethodInfo(pMasterPatchCur->key.module, pMasterPatchCur->key.md);
+
+ // Found a relevant IL master patch. Now bind all corresponding slave patches
+ // that belong to this Module
+ DebuggerMethodInfo::DJIIterator it;
+ dmi->IterateAllDJIs(pAppDomain, pRuntimeModule, &it);
+ for (; !it.IsAtEnd(); it.Next())
+ {
+ DebuggerJitInfo *dji = it.Current();
+ _ASSERTE(dji->m_jitComplete);
+
+ if (dji->m_encVersion != pMasterPatchCur->GetEnCVersion())
+ continue;
+
+ // Do we already have a slave for this DJI & Controller? If so, no need
+ // to add another one
+ BOOL fSlaveExists = FALSE;
+ HASHFIND f;
+ for (DebuggerControllerPatch * pSlavePatchCur = pTable->GetFirstPatch(&f);
+ pSlavePatchCur != NULL;
+ pSlavePatchCur = pTable->GetNextPatch(&f))
+ {
+ if (pSlavePatchCur->IsILSlavePatch() &&
+ (pSlavePatchCur->GetDJI() == dji) &&
+ (pSlavePatchCur->controller == pMasterPatchCur->controller))
+ {
+ fSlaveExists = TRUE;
+ break;
+ }
+ }
+
+ if (fSlaveExists)
+ continue;
+
+ pMasterPatchCur->controller->AddBindAndActivateILSlavePatch(pMasterPatchCur, dji);
+ }
+ }
+ }
+}
+
+
+// Send the raw event for Updating symbols. Debugger must query for contents from out-of-process
+//
+// Arguments:
+// pRuntimeModule - required, module to send symbols for. May be domain neutral.
+// pAppDomain - required, appdomain that module is in.
+//
+// Notes:
+// This is just a ping event. Debugger must query for actual symbol contents.
+// This keeps the launch + attach cases identical.
+// This just sends the raw event and does not synchronize the runtime.
+// Use code:Debugger.SendUpdateModuleSymsEventAndBlock for that.
+void Debugger::SendRawUpdateModuleSymsEvent(Module *pRuntimeModule, AppDomain *pAppDomain)
+{
+// @telest - do we need an #ifdef FEATURE_FUSION here?
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+
+ PRECONDITION(ThreadHoldsLock());
+
+ // Debugger must have been attached to get us to this point.
+ // We hold the Debugger-lock, so debugger could not have detached from
+ // underneath us either.
+ PRECONDITION(CORDebuggerAttached());
+ }
+ CONTRACTL_END;
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ // This event is used to trigger the ICorDebugManagedCallback::UpdateModuleSymbols
+ // callback. That callback is defined to pass a PDB stream, and so we still use this
+ // only for legacy compatability reasons when we've actually got PDB symbols.
+ // New clients know they must request a new symbol reader after ClassLoad events.
+ if (pRuntimeModule->GetInMemorySymbolStreamFormat() != eSymbolFormatPDB)
+ return; // Non-PDB symbols
+
+ DebuggerModule* module = LookupOrCreateModule(pRuntimeModule, pAppDomain);
+ PREFIX_ASSUME(module != NULL);
+
+ DebuggerIPCEvent* ipce = NULL;
+ ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce, DB_IPCE_UPDATE_MODULE_SYMS,
+ g_pEEInterface->GetThread(),
+ pAppDomain);
+
+ ipce->UpdateModuleSymsData.vmDomainFile.SetRawPtr((module ? module->GetDomainFile() : NULL));
+
+ m_pRCThread->SendIPCEvent();
+}
+
+//
+// UpdateModuleSyms is called when the symbols for a module need to be
+// sent to the Right Side because they've changed.
+//
+// Arguments:
+// pRuntimeModule - required, module to send symbols for. May be domain neutral.
+// pAppDomain - required, appdomain that module is in.
+//
+//
+// Notes:
+// This will send the event (via code:Debugger.SendRawUpdateModuleSymsEvent) and then synchronize
+// the runtime waiting for a continue.
+//
+// This should only be called in cases where we reasonably expect to send symbols.
+// However, this may not send symbols if the symbols aren't available.
+void Debugger::SendUpdateModuleSymsEventAndBlock(Module* pRuntimeModule, AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (CORDBUnrecoverableError(this) || !CORDebuggerAttached())
+ {
+ return;
+ }
+
+ CGrowableStream * pStream = pRuntimeModule->GetInMemorySymbolStream();
+ LOG((LF_CORDB, LL_INFO10000, "D::UMS: update module syms RuntimeModule:0x%08x CGrowableStream:0x%08x\n", pRuntimeModule, pStream));
+ if (pStream == NULL)
+ {
+ // No in-memory Pdb available.
+ STRESS_LOG1(LF_CORDB, LL_INFO10000, "No syms available %p", pRuntimeModule);
+ return;
+ }
+
+ SENDIPCEVENT_BEGIN(this, g_pEEInterface->GetThread()); // toggles to preemptive
+
+ // Actually send the event
+ if (CORDebuggerAttached())
+ {
+ SendRawUpdateModuleSymsEvent(pRuntimeModule, pAppDomain);
+ TrapAllRuntimeThreads();
+ }
+
+ SENDIPCEVENT_END;
+}
+
+
+//
+// UnloadModule is called by the Runtime for each module (including shared ones)
+// in an AppDomain that is being unloaded, when a debugger is attached.
+// In the EE, a module may be domain-neutral and therefore shared across all AppDomains.
+// We abstract this detail away in the Debugger and consider each such EE module to correspond
+// to multiple "Debugger Module" instances (one per AppDomain).
+// Therefore, this doesn't necessarily mean the runtime is unloading the module, just
+// that the Debugger should consider it's (per-AppDomain) DebuggerModule to be unloaded.
+//
+void Debugger::UnloadModule(Module* pRuntimeModule,
+ AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ // @@@@
+ // implements DebugInterface.
+ // can only called by EE on Module::NotifyDebuggerUnload
+ //
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+
+
+ LOG((LF_CORDB, LL_INFO100, "D::UM: unload module Mod:%#08x AD:%#08x runtimeMod:%#08x modName:%ls\n",
+ LookupOrCreateModule(pRuntimeModule, pAppDomain), pAppDomain, pRuntimeModule, pRuntimeModule->GetDebugName()));
+
+
+ Thread *thread = g_pEEInterface->GetThread();
+ SENDIPCEVENT_BEGIN(this, thread);
+
+ if (CORDebuggerAttached())
+ {
+
+ DebuggerModule* module = LookupOrCreateModule(pRuntimeModule, pAppDomain);
+ if (module == NULL)
+ {
+ LOG((LF_CORDB, LL_INFO100, "D::UM: module already unloaded AD:%#08x runtimeMod:%#08x modName:%ls\n",
+ pAppDomain, pRuntimeModule, pRuntimeModule->GetDebugName()));
+ goto LExit;
+ }
+ _ASSERTE(module != NULL);
+
+ STRESS_LOG3(LF_CORDB, LL_INFO10000, "D::UM: Unloading Mod:%#08x, %#08x, %#08x\n",
+ pRuntimeModule, pAppDomain, pRuntimeModule->IsIStream());
+
+ // Note: the appdomain the module was loaded in must match the appdomain we're unloading it from. If it doesn't,
+ // then we've either found the wrong DebuggerModule in LookupModule or we were passed bad data.
+ _ASSERTE(module->GetAppDomain() == pAppDomain);
+
+ // Send the unload module event to the Right Side.
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce, DB_IPCE_UNLOAD_MODULE, thread, pAppDomain);
+ ipce->UnloadModuleData.vmDomainFile.SetRawPtr((module ? module->GetDomainFile() : NULL));
+ ipce->UnloadModuleData.debuggerAssemblyToken.Set(pRuntimeModule->GetClassLoader()->GetAssembly());
+ m_pRCThread->SendIPCEvent();
+
+ //
+ // Cleanup the module (only for resources consumed when a debugger is attached)
+ //
+
+ // Remove all patches that apply to this module/AppDomain combination
+ AppDomain* domainToRemovePatchesIn = NULL; // all domains by default
+ if( pRuntimeModule->GetAssembly()->IsDomainNeutral() )
+ {
+ // Deactivate all the patches specific to the AppDomain being unloaded
+ domainToRemovePatchesIn = pAppDomain;
+ }
+ // Note that we'll explicitly NOT delete DebuggerControllers, so that
+ // the Right Side can delete them later.
+ DebuggerController::RemovePatchesFromModule(pRuntimeModule, domainToRemovePatchesIn);
+
+ // Deactive all JMC functions in this module. We don't do this for shared assemblies
+ // because JMC status is not maintained on a per-AppDomain basis and we don't
+ // want to change the JMC behavior of the module in other domains.
+ if( !pRuntimeModule->GetAssembly()->IsDomainNeutral() )
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "Setting all JMC methods to false:\n"));
+ DebuggerDataLockHolder debuggerDataLockHolder(this);
+ DebuggerMethodInfoTable * pTable = GetMethodInfoTable();
+ if (pTable != NULL)
+ {
+ HASHFIND info;
+
+ for (DebuggerMethodInfo *dmi = pTable->GetFirstMethodInfo(&info);
+ dmi != NULL;
+ dmi = pTable->GetNextMethodInfo(&info))
+ {
+ if (dmi->m_module == pRuntimeModule)
+ {
+ dmi->SetJMCStatus(false);
+ }
+ }
+ }
+ LOG((LF_CORDB, LL_EVERYTHING, "Done clearing JMC methods!\n"));
+ }
+
+ // Delete the Left Side representation of the module.
+ if (m_pModules != NULL)
+ {
+ DebuggerDataLockHolder chInfo(this);
+ m_pModules->RemoveModule(pRuntimeModule, pAppDomain);
+ }
+
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO1000, "D::UM: Skipping SendIPCEvent because RS detached."));
+ }
+
+LExit:
+ SENDIPCEVENT_END;
+}
+
+// Called when this module is completely gone from ALL AppDomains, regardless of
+// whether a debugger is attached.
+// Note that this doesn't get called until after the ADUnload is complete, which happens
+// asyncronously in Whidbey (and won't happen at all if the process shuts down first).
+// This is normally not called only domain-neutral assemblies because they can't be unloaded.
+// However, it may be called if the loader fails to completely load a domain-neutral assembly.
+void Debugger::DestructModule(Module *pModule)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO100, "D::DM: destruct module runtimeMod:%#08x modName:%ls\n",
+ pModule, pModule->GetDebugName()));
+
+ // @@@
+ // Implements DebugInterface.
+ // It is called for Module::Destruct. We do not need to send any IPC event.
+
+ DebuggerLockHolder dbgLockHolder(this);
+
+ // We should have removed all patches at AD unload time (or detach time if the
+ // debugger detached).
+ _ASSERTE( !DebuggerController::ModuleHasPatches(pModule) );
+
+ // Do module clean-up that applies even when no debugger is attached.
+ // Ideally, we might like to do this cleanup more eagerly and detministically,
+ // but we don't currently get any early AD unload callback from the loader
+ // when no debugger is attached. Perhaps we should make the loader
+ // call this callback earlier.
+ RemoveModuleReferences(pModule);
+}
+
+
+// Internal helper to remove all the DJIs / DMIs and other references for a given Module.
+// If we don't remove the DJIs / DMIs, then we're subject to recycling bugs because the underlying
+// MethodDescs will get removed. Thus we'll look up a new MD and it will pull up an old DMI that matched
+// the old MD. Now the DMI and MD are out of sync and it's downhill from there.
+// Note that DMIs may be used (and need cleanup) even when no debugger is attached.
+void Debugger::RemoveModuleReferences( Module* pModule )
+{
+ _ASSERTE( ThreadHoldsLock() );
+
+ // We want to remove all references to the module from the various
+ // tables. It's not just possible, but probable, that the module
+ // will be re-loaded at the exact same address, and in that case,
+ // we'll have piles of entries in our DJI table that mistakenly
+ // match this new module.
+ // Note that this doesn't apply to domain neutral assemblies, that only
+ // get unloaded when the process dies. We won't be reclaiming their
+ // DJIs/patches b/c the process is going to die, so we'll reclaim
+ // the memory when the various hashtables are unloaded.
+
+ if (m_pMethodInfos != NULL)
+ {
+ HRESULT hr = S_OK;
+ if (!HasLazyData())
+ {
+ hr = LazyInitWrapper();
+ }
+
+ if (SUCCEEDED(hr))
+ {
+ DebuggerDataLockHolder debuggerDataLockHolder(this);
+
+ m_pMethodInfos->ClearMethodsOfModule(pModule);
+
+ // DebuggerDataLockHolder out of scope - release implied
+ }
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// SendClassLoadUnloadEvent - notify the RS of a class either loading or unloading.
+//
+// Arguments:
+//
+// fAttaching - true if a debugger is in the process of attaching
+//
+// Return Value:
+// None
+//
+//---------------------------------------------------------------------------------------
+void Debugger::SendClassLoadUnloadEvent (mdTypeDef classMetadataToken,
+ DebuggerModule * pClassDebuggerModule,
+ Assembly *pAssembly,
+ AppDomain *pAppDomain,
+ BOOL fIsLoadEvent)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+
+ LOG((LF_CORDB,LL_INFO10000, "D::SCLUE: Tok:0x%x isLoad:0x%x Mod:%#08x AD:%#08x\n",
+ classMetadataToken, fIsLoadEvent, pClassDebuggerModule, pAppDomain));
+
+ DebuggerIPCEvent * pEvent = m_pRCThread->GetIPCEventSendBuffer();
+
+ BOOL fIsReflection = pClassDebuggerModule->GetRuntimeModule()->IsReflection();
+
+ if (fIsLoadEvent == TRUE)
+ {
+ // We need to update Metadata before Symbols (since symbols depend on metadata)
+ // It's debatable which needs to come first: Class Load or Sym update.
+ // V1.1 sent Sym Update first so that binding at the class load has the latest symbols.
+ // However, The Class Load may need to be in sync with updating new metadata,
+ // and that has to come before the Sym update.
+ InitIPCEvent(pEvent, DB_IPCE_LOAD_CLASS, g_pEEInterface->GetThread(), pAppDomain);
+
+ pEvent->LoadClass.classMetadataToken = classMetadataToken;
+ pEvent->LoadClass.vmDomainFile.SetRawPtr((pClassDebuggerModule ? pClassDebuggerModule->GetDomainFile() : NULL));
+ pEvent->LoadClass.classDebuggerAssemblyToken.Set(pAssembly);
+
+
+ // For class loads in dynamic modules, RS knows that the metadata has now grown and is invalid.
+ // RS will re-fetch new metadata from out-of-process.
+ }
+ else
+ {
+ InitIPCEvent(pEvent, DB_IPCE_UNLOAD_CLASS, g_pEEInterface->GetThread(), pAppDomain);
+
+ pEvent->UnloadClass.classMetadataToken = classMetadataToken;
+ pEvent->UnloadClass.vmDomainFile.SetRawPtr((pClassDebuggerModule ? pClassDebuggerModule->GetDomainFile() : NULL));
+ pEvent->UnloadClass.classDebuggerAssemblyToken.Set(pAssembly);
+ }
+
+ m_pRCThread->SendIPCEvent();
+
+ if (fIsLoadEvent && fIsReflection)
+ {
+ // Send the raw event, but don't actually sync and block the runtime.
+ SendRawUpdateModuleSymsEvent(pClassDebuggerModule->GetRuntimeModule(), pAppDomain);
+ }
+
+}
+
+
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+BOOL Debugger::SendSystemClassLoadUnloadEvent(mdTypeDef classMetadataToken,
+ Module *classModule,
+ BOOL fIsLoadEvent)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ if (!m_dClassLoadCallbackCount)
+ {
+ return FALSE;
+ }
+
+ BOOL fRetVal = FALSE;
+
+ Assembly *pAssembly = classModule->GetAssembly();
+
+ if (!m_pAppDomainCB->Lock())
+ return (FALSE);
+
+ AppDomainInfo *pADInfo = m_pAppDomainCB->FindFirst();
+
+ while (pADInfo != NULL)
+ {
+ AppDomain *pAppDomain = pADInfo->m_pAppDomain;
+ _ASSERTE(pAppDomain != NULL);
+
+ // Only notify for app domains where the module has been fully loaded already
+ // We used to make a different check here domain->ContainsAssembly() but that
+ // triggers too early in the loading process. FindDomainFile will not become
+ // non-NULL until the module is fully loaded into the domain which is what we
+ // want.
+ if ((classModule->FindDomainFile(pAppDomain) != NULL ) &&
+ !(fIsLoadEvent && pAppDomain->IsUnloading()) )
+ {
+ // Find the Left Side module that this class belongs in.
+ DebuggerModule* pModule = LookupOrCreateModule(classModule, pAppDomain);
+ _ASSERTE(pModule != NULL);
+
+ // Only send a class load event if they're enabled for this module.
+ if (pModule && pModule->ClassLoadCallbacksEnabled())
+ {
+ SendClassLoadUnloadEvent(classMetadataToken,
+ pModule,
+ pAssembly,
+ pAppDomain,
+ fIsLoadEvent);
+ fRetVal = TRUE;
+ }
+ }
+
+ pADInfo = m_pAppDomainCB->FindNext(pADInfo);
+ }
+
+ m_pAppDomainCB->Unlock();
+
+ return fRetVal;
+}
+
+
+//
+// LoadClass is called when a Runtime thread loads a new Class.
+// Returns TRUE if an event is sent, FALSE otherwise
+BOOL Debugger::LoadClass(TypeHandle th,
+ mdTypeDef classMetadataToken,
+ Module *classModule,
+ AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ // @@@
+ // Implements DebugInterface
+ // This can be called by EE/Loader when class is loaded.
+ //
+
+ BOOL fRetVal = FALSE;
+
+ if (CORDBUnrecoverableError(this))
+ return FALSE;
+
+ // Note that pAppDomain may be null. The AppDomain isn't used here, and doesn't make a lot of sense since
+ // we may be delivering the notification for a class in an assembly which is loaded into multiple AppDomains. We
+ // handle this in SendSystemClassLoadUnloadEvent below by looping through all AppDomains and dispatching
+ // events for each that contain this assembly.
+
+ LOG((LF_CORDB, LL_INFO10000, "D::LC: load class Tok:%#08x Mod:%#08x AD:%#08x classMod:%#08x modName:%ls\n",
+ classMetadataToken, (pAppDomain == NULL) ? NULL : LookupOrCreateModule(classModule, pAppDomain),
+ pAppDomain, classModule, classModule->GetDebugName()));
+
+ //
+ // If we're attaching, then we only need to send the event. We
+ // don't need to disable event handling or lock the debugger
+ // object.
+ //
+ SENDIPCEVENT_BEGIN(this, g_pEEInterface->GetThread());
+
+ if (CORDebuggerAttached())
+ {
+ fRetVal = SendSystemClassLoadUnloadEvent(classMetadataToken, classModule, TRUE);
+
+ if (fRetVal == TRUE)
+ {
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+ }
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO1000, "D::LC: Skipping SendIPCEvent because RS detached."));
+ }
+
+ SENDIPCEVENT_END;
+
+ return fRetVal;
+}
+
+
+//
+// UnloadClass is called when a Runtime thread unloads a Class.
+//
+void Debugger::UnloadClass(mdTypeDef classMetadataToken,
+ Module *classModule,
+ AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ // @@@
+ // Implements DebugInterface
+ // Can only be called from EE
+
+ if (CORDBUnrecoverableError(this))
+ {
+ return;
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "D::UC: unload class Tok:0x%08x Mod:%#08x AD:%#08x runtimeMod:%#08x modName:%ls\n",
+ classMetadataToken, LookupOrCreateModule(classModule, pAppDomain), pAppDomain, classModule, classModule->GetDebugName()));
+
+ Assembly *pAssembly = classModule->GetClassLoader()->GetAssembly();
+ DebuggerModule *pModule = LookupOrCreateModule(classModule, pAppDomain);
+
+ if ((pModule == NULL) || !pModule->ClassLoadCallbacksEnabled())
+ {
+ return;
+ }
+
+ SENDIPCEVENT_BEGIN(this, g_pEEInterface->GetThread());
+
+ if (CORDebuggerAttached())
+ {
+ _ASSERTE((pAppDomain != NULL) && (pAssembly != NULL) && (pModule != NULL));
+
+ SendClassLoadUnloadEvent(classMetadataToken, pModule, pAssembly, pAppDomain, FALSE);
+
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO1000, "D::UC: Skipping SendIPCEvent because RS detached."));
+ }
+
+ // Let other Runtime threads handle their events.
+ SENDIPCEVENT_END;
+
+}
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+void Debugger::FuncEvalComplete(Thread* pThread, DebuggerEval *pDE)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::FEC: func eval complete pDE:%08x evalType:%d %s %s\n",
+ pDE, pDE->m_evalType, pDE->m_successful ? "Success" : "Fail", pDE->m_aborted ? "Abort" : "Completed"));
+
+
+ _ASSERTE(pDE->m_completed);
+ _ASSERTE((g_pEEInterface->GetThread() && !g_pEEInterface->GetThread()->m_fPreemptiveGCDisabled) || g_fInControlC);
+ _ASSERTE(ThreadHoldsLock());
+
+ // If we need to rethrow a ThreadAbortException then set the thread's state so we remember that.
+ if (pDE->m_rethrowAbortException)
+ {
+ pThread->SetThreadStateNC(Thread::TSNC_DebuggerReAbort);
+ }
+
+
+ //
+ // Get the domain that the result is valid in. The RS will cache this in the ICorDebugValue
+ // Note: it's possible that the AppDomain has (or is about to be) unloaded, which could lead to a
+ // crash when we use the DebuggerModule. Ideally we'd only be using AppDomain IDs here.
+ // We can't easily convert our ADID to an AppDomain* (SystemDomain::GetAppDomainFromId)
+ // because we can't proove that that the AppDomain* would be valid (not unloaded).
+ //
+ AppDomain *pDomain = pThread->GetDomain();
+ AppDomain *pResultDomain = ((pDE->m_debuggerModule == NULL) ? pDomain : pDE->m_debuggerModule->GetAppDomain());
+ _ASSERTE( pResultDomain->GetId() == pDE->m_appDomainId );
+
+ // Send a func eval complete event to the Right Side.
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce, DB_IPCE_FUNC_EVAL_COMPLETE, pThread, pDomain);
+
+ ipce->FuncEvalComplete.funcEvalKey = pDE->m_funcEvalKey;
+ ipce->FuncEvalComplete.successful = pDE->m_successful;
+ ipce->FuncEvalComplete.aborted = pDE->m_aborted;
+ ipce->FuncEvalComplete.resultAddr = &(pDE->m_result);
+ ipce->FuncEvalComplete.vmAppDomain.SetRawPtr(pResultDomain);
+ ipce->FuncEvalComplete.vmObjectHandle = pDE->m_vmObjectHandle;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::FEC: TypeHandle is :%08x\n", pDE->m_resultType.AsPtr()));
+
+ Debugger::TypeHandleToExpandedTypeInfo(pDE->m_retValueBoxing, // whether return values get boxed or not depends on the particular FuncEval we're doing...
+ pResultDomain,
+ pDE->m_resultType,
+ &ipce->FuncEvalComplete.resultType);
+
+ _ASSERTE(ipce->FuncEvalComplete.resultType.elementType != ELEMENT_TYPE_VALUETYPE);
+
+ LOG((LF_CORDB, LL_INFO10000, "D::FEC: returned from call\n"));
+
+ // We must adjust the result address to point to the right place
+ ipce->FuncEvalComplete.resultAddr = ArgSlotEndianessFixup((ARG_SLOT*)ipce->FuncEvalComplete.resultAddr,
+ GetSizeForCorElementType(ipce->FuncEvalComplete.resultType.elementType));
+
+ m_pRCThread->SendIPCEvent();
+
+#endif
+}
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+bool Debugger::ResumeThreads(AppDomain* pAppDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(ThisIsHelperThreadWorker());
+ }
+ CONTRACTL_END;
+
+ // Okay, mark that we're not stopped anymore and let the
+ // Runtime threads go...
+ ReleaseAllRuntimeThreads(pAppDomain);
+
+ // Return that we've continued the process.
+ return true;
+}
+
+
+class CodeBuffer
+{
+public:
+
+ BYTE *getCodeBuffer(DebuggerJitInfo *dji)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ CodeRegionInfo codeRegionInfo = CodeRegionInfo::GetCodeRegionInfo(dji);
+
+ if (codeRegionInfo.getAddrOfColdCode())
+ {
+ _ASSERTE(codeRegionInfo.getSizeOfHotCode() != 0);
+ _ASSERTE(codeRegionInfo.getSizeOfColdCode() != 0);
+ S_SIZE_T totalSize = S_SIZE_T( codeRegionInfo.getSizeOfHotCode() ) +
+ S_SIZE_T( codeRegionInfo.getSizeOfColdCode() );
+ if ( totalSize.IsOverflow() )
+ {
+ _ASSERTE(0 && "Buffer overflow error in getCodeBuffer");
+ return NULL;
+ }
+
+ BYTE *code = (BYTE *) buffer.AllocNoThrow( totalSize.Value() );
+ if (code)
+ {
+ memcpy(code,
+ (void *) codeRegionInfo.getAddrOfHotCode(),
+ codeRegionInfo.getSizeOfHotCode());
+
+ memcpy(code + codeRegionInfo.getSizeOfHotCode(),
+ (void *) codeRegionInfo.getAddrOfColdCode(),
+ codeRegionInfo.getSizeOfColdCode());
+
+ // Now patch the control transfer instructions
+ }
+
+ return code;
+ }
+ else
+ {
+ return dac_cast<PTR_BYTE>(codeRegionInfo.getAddrOfHotCode());
+ }
+ }
+private:
+
+ CQuickBytes buffer;
+};
+
+
+//---------------------------------------------------------------------------------------
+//
+// Called on the helper thread to serialize metadata so it can be read out-of-process.
+//
+// Arguments:
+// pModule - module that needs metadata serialization
+// countBytes - out value, holds the number of bytes which were allocated in the
+// serialized buffer
+//
+// Return Value:
+// A pointer to a serialized buffer of metadata. The caller should free this bufer using
+// DeleteInteropSafe
+//
+// Assumptions:
+// This is called on the helper-thread, or a thread pretending to be the helper-thread.
+// For any synchronous message, the debuggee should be synchronized. The only async
+// messages are Attach and Async-Break.
+//
+//
+//---------------------------------------------------------------------------------------
+BYTE* Debugger::SerializeModuleMetaData(Module * pModule, DWORD * countBytes)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "Debugger::SMMD called\n"));
+
+ // Do not release the emitter. This is a weak reference.
+ IMetaDataEmit *pEmitter = pModule->GetEmitter();
+ _ASSERTE(pEmitter != NULL);
+
+ HRESULT hr;
+ BYTE* metadataBuffer = NULL;
+ ReleaseHolder<IMDInternalEmit> pInternalEmitter;
+ ULONG originalUpdateMode;
+ hr = pEmitter->QueryInterface(IID_IMDInternalEmit, (void **)&pInternalEmitter);
+ if(FAILED(hr))
+ {
+ LOG((LF_CORDB, LL_INFO10, "Debugger::SMMD pEmitter doesn't support IID_IMDInternalEmit hr=0x%x\n", hr));
+ ThrowHR(hr);
+ }
+ _ASSERTE(pInternalEmitter != NULL);
+
+ hr = pInternalEmitter->SetMDUpdateMode(MDUpdateExtension, &originalUpdateMode);
+ if(FAILED(hr))
+ {
+ LOG((LF_CORDB, LL_INFO10, "Debugger::SMMD SetMDUpdateMode failed hr=0x%x\n", hr));
+ ThrowHR(hr);
+ }
+ _ASSERTE(originalUpdateMode == MDUpdateFull);
+
+ hr = pEmitter->GetSaveSize(cssQuick, countBytes);
+ if(FAILED(hr))
+ {
+ LOG((LF_CORDB, LL_INFO10, "Debugger::SMMD GetSaveSize failed hr=0x%x\n", hr));
+ pInternalEmitter->SetMDUpdateMode(originalUpdateMode, NULL);
+ ThrowHR(hr);
+ }
+
+ EX_TRY
+ {
+ metadataBuffer = new (interopsafe) BYTE[*countBytes];
+ }
+ EX_CATCH
+ {
+ LOG((LF_CORDB, LL_INFO10, "Debugger::SMMD Allocation failed\n"));
+ pInternalEmitter->SetMDUpdateMode(originalUpdateMode, NULL);
+ EX_RETHROW;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ _ASSERTE(metadataBuffer != NULL); // allocation would throw first
+
+ // Caller ensures serialization that guarantees that the metadata doesn't grow underneath us.
+ hr = pEmitter->SaveToMemory(metadataBuffer, *countBytes);
+ if(FAILED(hr))
+ {
+ LOG((LF_CORDB, LL_INFO10, "Debugger::SMMD SaveToMemory failed hr=0x%x\n", hr));
+ DeleteInteropSafe(metadataBuffer);
+ pInternalEmitter->SetMDUpdateMode(originalUpdateMode, NULL);
+ ThrowHR(hr);
+ }
+
+ pInternalEmitter->SetMDUpdateMode(originalUpdateMode, NULL);
+ LOG((LF_CORDB, LL_INFO10000, "Debugger::SMMD exiting\n"));
+ return metadataBuffer;
+}
+
+#ifdef FEATURE_LEGACYNETCF_DBG_HOST_CONTROL
+//---------------------------------------------------------------------------------------
+//
+// Called on the helper thread to send a pause notification to the host
+//
+//
+// This is called on the helper-thread, or a thread pretending to be the helper-thread.
+// The debuggee should be synchronized. This callback to the host is only supported
+// on Windows Phone as a replacement for some legacy NetCF behavior. In general I don't
+// like being the transport between the VS debugger and the host, so don't use
+// this as precedent that we should start making more callbacks for them. In the future
+// the debugger and host should make alternative arrangements such as window messages,
+// out of proc event signaling, or any other IPC mechanism.
+//
+// This should be deprecated as soon as mixed-mode debugging is available. The
+// end goal on phone is to pause the UI thread while VS is in the break state. That
+// will be accomplished by a mixed-mode debugger suspending all native threads when
+// it breaks rather than having us send a special message.
+//
+//---------------------------------------------------------------------------------------
+VOID Debugger::InvokeLegacyNetCFHostPauseCallback()
+{
+ IHostNetCFDebugControlManager* pHostCallback = CorHost2::GetHostNetCFDebugControlManager();
+ if(pHostCallback != NULL)
+ {
+ pHostCallback->NotifyPause(0);
+ }
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Called on the helper thread to send a resume notification to the host
+//
+//
+// This is called on the helper-thread, or a thread pretending to be the helper-thread.
+// The debuggee should be synchronized. This callback to the host is only supported
+// on Windows Phone as a replacement for some legacy NetCF behavior. In general I don't
+// like being the transport between the VS debugger and the host, so don't use
+// this as precedent that we should start making more callbacks for them. In the future
+// the debugger and host should make alternative arrangements such as window messages,
+// out of proc event signaling, or any other IPC mechanism.
+//
+// This should be deprecated as soon as mixed-mode debugging is available. The
+// end goal on phone is to pause the UI thread while VS is in the break state. That
+// will be accomplished by a mixed-mode debugger suspending all native threads when
+// it breaks rather than having us send a special message.
+//
+//---------------------------------------------------------------------------------------
+VOID Debugger::InvokeLegacyNetCFHostResumeCallback()
+{
+ IHostNetCFDebugControlManager* pHostCallback = CorHost2::GetHostNetCFDebugControlManager();
+ if(pHostCallback != NULL)
+ {
+ pHostCallback->NotifyResume(0);
+ }
+}
+#endif //FEATURE_LEGACYNETCF_DBG_HOST_CONTROL
+
+//---------------------------------------------------------------------------------------
+//
+// Handle an IPC event from the Debugger.
+//
+// Arguments:
+// event - IPC event to handle.
+//
+// Return Value:
+// True if the event was a continue. Else false.
+//
+// Assumptions:
+// This is called on the helper-thread, or a thread pretending to be the helper-thread.
+// For any synchronous message, the debuggee should be synchronized. The only async
+// messages are Attach and Async-Break.
+//
+// Notes:
+// HandleIPCEvent is called by the RC thread in response to an event
+// from the Debugger Interface. No other IPC events, nor any Runtime
+// events will come in until this method returns. Returns true if this
+// was a Continue event.
+//
+// If this function is called on native debugger helper thread, we will
+// handle everything. However if this is called on managed thread doing
+// helper thread duty, we will fail on operation since we are mainly
+// waiting for CONTINUE message from the RS.
+//
+//
+//---------------------------------------------------------------------------------------
+
+#ifdef _PREFAST_
+#pragma warning(push)
+#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
+#endif
+bool Debugger::HandleIPCEvent(DebuggerIPCEvent * pEvent)
+{
+ CONTRACTL
+ {
+ THROWS;
+ if (g_pEEInterface->GetThread() != NULL) { GC_TRIGGERS; } else { GC_NOTRIGGER; }
+
+ PRECONDITION(ThisIsHelperThreadWorker());
+
+ if (m_stopped)
+ {
+ MODE_COOPERATIVE;
+ }
+ else
+ {
+ MODE_ANY;
+ }
+ }
+ CONTRACTL_END;
+
+ // If we're the temporary helper thread, then we may reject certain operations.
+ bool temporaryHelp = ThisIsTempHelperThread();
+
+
+#ifdef _DEBUG
+ // This reg key allows us to test our unhandled event filter installed in HandleIPCEventWrapper
+ // to make sure it works properly.
+ static int s_fDbgFaultInHandleIPCEvent = -1;
+ if (s_fDbgFaultInHandleIPCEvent == -1)
+ {
+ s_fDbgFaultInHandleIPCEvent = UnsafeGetConfigDWORD(CLRConfig::INTERNAL_DbgFaultInHandleIPCEvent);
+ }
+
+ // If we need to fault, let's generate an access violation.
+ if (s_fDbgFaultInHandleIPCEvent)
+ {
+ *((BYTE *)0) = 0;
+ }
+#endif
+
+ BOOL fSuccess;
+ bool fContinue = false;
+ HRESULT hr = S_OK;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::HIPCE: got %s\n", IPCENames::GetName(pEvent->type)));
+ DbgLog((DebuggerIPCEventType)(pEvent->type & DB_IPCE_TYPE_MASK));
+
+ // As for runtime is considered stopped, it means that managed threads will not
+ // execute anymore managed code. However, these threads may be still running for
+ // unmanaged code. So it is not true that we do not need to hold the lock while processing
+ // synchrnoized event.
+ //
+ // The worst of all, it is the special case where user break point and exception can
+ // be sent as part of attach if debugger was launched by managed app.
+ //
+ DebuggerLockHolder dbgLockHolder(this, FALSE);
+
+ if ((pEvent->type & DB_IPCE_TYPE_MASK) == DB_IPCE_ASYNC_BREAK ||
+ (pEvent->type & DB_IPCE_TYPE_MASK) == DB_IPCE_ATTACHING)
+ {
+ dbgLockHolder.Acquire();
+ }
+ else
+ {
+ _ASSERTE(m_stopped);
+ _ASSERTE(ThreadHoldsLock());
+ }
+
+
+ switch (pEvent->type & DB_IPCE_TYPE_MASK)
+ {
+
+ case DB_IPCE_ATTACHING:
+ // In V3, Attach is atomic, meaning that there isn't a complex handshake back and forth between LS + RS.
+ // the RS sends a single-attaching event and attaches at the first response from the Left-side.
+ StartCanaryThread();
+
+ // In V3 after attaching event was handled we iterate throughout all ADs and made shadow copies of PDBs in the BIN directories.
+ // After all AppDomain, DomainAssembly and modules iteration was available in out-of-proccess model in V4 the code that enables
+ // PDBs to be copied was not called at attach time.
+ // Eliminating PDBs copying side effect is an issue: Dev10 #927143
+ EX_TRY
+ {
+ IterateAppDomainsForPdbs();
+ }
+ EX_CATCH_HRESULT(hr); // ignore failures
+
+ if (m_jitAttachInProgress)
+ {
+ // For jit-attach, mark that we're attached now.
+ // This lets callers to code:Debugger.JitAttach check the flag and
+ // send the jit-attach event just like a normal event.
+ MarkDebuggerAttachedInternal();
+
+ // set the managed attach event so that waiting threads can continue
+ VERIFY(SetEvent(GetAttachEvent()));
+ break;
+ }
+
+ VERIFY(SetEvent(GetAttachEvent()));
+
+ //
+ // For regular (non-jit) attach, fall through to do an async break.
+ //
+
+ case DB_IPCE_ASYNC_BREAK:
+ {
+ if (temporaryHelp)
+ {
+ // Don't support async break on temporary helper thread.
+ // Well, this function does not return HR. So this means that
+ // ASYNC_BREAK event will be catching silently while we are
+ // doing helper thread duty!
+ //
+ hr = CORDBG_E_NOTREADY;
+ }
+ else
+ {
+ // not synchornized. We get debugger lock upon the function entry
+ _ASSERTE(ThreadHoldsLock());
+
+ // Simply trap all Runtime threads if we're not already trying to.
+ if (!m_trappingRuntimeThreads)
+ {
+ // If the RS sent an Async-break, then that's an explicit request.
+ m_RSRequestedSync = TRUE;
+ TrapAllRuntimeThreads(); // Non-blocking...
+ }
+ }
+ break;
+ }
+
+ case DB_IPCE_CONTINUE:
+ {
+ GetCanary()->ClearCache();
+
+ fContinue = ResumeThreads(pEvent->vmAppDomain.GetRawPtr());
+
+ //
+ // Go ahead and release the TSL now that we're continuing. This ensures that we've held
+ // the thread store lock the entire time the Runtime was just stopped.
+ //
+ ThreadSuspend::UnlockThreadStore(FALSE, ThreadSuspend::SUSPEND_FOR_DEBUGGER);
+
+ break;
+ }
+
+ case DB_IPCE_BREAKPOINT_ADD:
+ {
+
+ //
+ // Currently, we can't create a breakpoint before a
+ // function desc is available.
+ // Also, we can't know if a breakpoint is ok
+ // prior to the method being JITted.
+ //
+
+ _ASSERTE(hr == S_OK);
+ DebuggerBreakpoint * pDebuggerBP = NULL;
+
+ DebuggerModule * pDebuggerModule = LookupOrCreateModule(pEvent->BreakpointData.vmDomainFile);
+ Module * pModule = pDebuggerModule->GetRuntimeModule();
+ DebuggerMethodInfo * pDMI = GetOrCreateMethodInfo(pModule, pEvent->BreakpointData.funcMetadataToken);
+ MethodDesc * pMethodDesc = pEvent->BreakpointData.nativeCodeMethodDescToken.UnWrap();
+
+ DebuggerJitInfo * pDJI = NULL;
+ if ((pMethodDesc != NULL) && (pDMI != NULL))
+ {
+ pDJI = pDMI->FindOrCreateInitAndAddJitInfo(pMethodDesc);
+ }
+
+ {
+ // If we haven't been either JITted or EnC'd yet, then
+ // we'll put a patch in by offset, implicitly relative
+ // to the first version of the code.
+
+ pDebuggerBP = new (interopsafe, nothrow) DebuggerBreakpoint(pModule,
+ pEvent->BreakpointData.funcMetadataToken,
+ pEvent->vmAppDomain.GetRawPtr(),
+ pEvent->BreakpointData.offset,
+ !pEvent->BreakpointData.isIL,
+ pEvent->BreakpointData.encVersion,
+ pMethodDesc,
+ pDJI,
+ &fSuccess);
+
+ TRACE_ALLOC(pDebuggerBP);
+
+ if ((pDebuggerBP != NULL) && !fSuccess)
+ {
+ DeleteInteropSafe(pDebuggerBP);
+ pDebuggerBP = NULL;
+ hr = CORDBG_E_UNABLE_TO_SET_BREAKPOINT;
+ }
+ }
+
+ if ((pDebuggerBP == NULL) && !FAILED(hr))
+ {
+ hr = E_OUTOFMEMORY;
+ }
+
+ LOG((LF_CORDB,LL_INFO10000,"\tBP Add: BPTOK:"
+ "0x%x, tok=0x%08x, offset=0x%x, isIL=%d dm=0x%x m=0x%x\n",
+ pDebuggerBP,
+ pEvent->BreakpointData.funcMetadataToken,
+ pEvent->BreakpointData.offset,
+ pEvent->BreakpointData.isIL,
+ pDebuggerModule,
+ pModule));
+
+ //
+ // We're using a two-way event here, so we place the
+ // result event into the _receive_ buffer, not the send
+ // buffer.
+ //
+
+ DebuggerIPCEvent * pIPCResult = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ InitIPCEvent(pIPCResult,
+ DB_IPCE_BREAKPOINT_ADD_RESULT,
+ g_pEEInterface->GetThread(),
+ pEvent->vmAppDomain);
+
+ pIPCResult->BreakpointData.breakpointToken.Set(pDebuggerBP);
+ pIPCResult->hr = hr;
+
+ m_pRCThread->SendIPCReply();
+ }
+ break;
+
+ case DB_IPCE_STEP:
+ {
+ LOG((LF_CORDB,LL_INFO10000, "D::HIPCE: stepIn:0x%x frmTok:0x%x"
+ "StepIn:0x%x RangeIL:0x%x RangeCount:0x%x MapStop:0x%x "
+ "InterceptStop:0x%x AppD:0x%x\n",
+ pEvent->StepData.stepIn,
+ pEvent->StepData.frameToken.GetSPValue(),
+ pEvent->StepData.stepIn,
+ pEvent->StepData.rangeIL,
+ pEvent->StepData.rangeCount,
+ pEvent->StepData.rgfMappingStop,
+ pEvent->StepData.rgfInterceptStop,
+ pEvent->vmAppDomain.GetRawPtr()));
+
+ // <TODO>@todo memory allocation - bad if we're synced</TODO>
+ Thread * pThread = pEvent->StepData.vmThreadToken.GetRawPtr();
+ AppDomain * pAppDomain = pEvent->vmAppDomain.GetRawPtr();
+
+ DebuggerIPCEvent * pIPCResult = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ InitIPCEvent(pIPCResult,
+ DB_IPCE_STEP_RESULT,
+ pThread,
+ pEvent->vmAppDomain);
+
+ if (temporaryHelp)
+ {
+ // Can't step on the temporary helper thread.
+ pIPCResult->hr = CORDBG_E_NOTREADY;
+ }
+ else
+ {
+ DebuggerStepper * pStepper;
+
+ if (pEvent->StepData.IsJMCStop)
+ {
+ pStepper = new (interopsafe, nothrow) DebuggerJMCStepper(pThread,
+ pEvent->StepData.rgfMappingStop,
+ pEvent->StepData.rgfInterceptStop,
+ pAppDomain);
+ }
+ else
+ {
+ pStepper = new (interopsafe, nothrow) DebuggerStepper(pThread,
+ pEvent->StepData.rgfMappingStop,
+ pEvent->StepData.rgfInterceptStop,
+ pAppDomain);
+ }
+
+ if (pStepper == NULL)
+ {
+ pIPCResult->hr = E_OUTOFMEMORY;
+
+ m_pRCThread->SendIPCReply();
+
+ break;
+ }
+ TRACE_ALLOC(pStepper);
+
+ unsigned int cRanges = pEvent->StepData.totalRangeCount;
+
+ _ASSERTE(cRanges == 0 || ((cRanges > 0) && (cRanges == pEvent->StepData.rangeCount)));
+
+ if (!pStepper->Step(pEvent->StepData.frameToken,
+ pEvent->StepData.stepIn,
+ &(pEvent->StepData.range),
+ cRanges,
+ ((cRanges > 0) ? pEvent->StepData.rangeIL : false)))
+ {
+ pIPCResult->hr = E_OUTOFMEMORY;
+
+ m_pRCThread->SendIPCReply();
+
+ DeleteInteropSafe(pStepper);
+ break;
+ }
+
+ pIPCResult->StepData.stepperToken.Set(pStepper);
+
+
+ } // end normal step case.
+
+
+ m_pRCThread->SendIPCReply();
+ }
+ break;
+
+ case DB_IPCE_STEP_OUT:
+ {
+ // <TODO>@todo memory allocation - bad if we're synced</TODO>
+ Thread * pThread = pEvent->StepData.vmThreadToken.GetRawPtr();
+ AppDomain * pAppDomain = pEvent->vmAppDomain.GetRawPtr();
+
+ DebuggerIPCEvent * pIPCResult = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ InitIPCEvent(pIPCResult,
+ DB_IPCE_STEP_RESULT,
+ pThread,
+ pAppDomain);
+
+ if (temporaryHelp)
+ {
+ // Can't step on the temporary helper thread.
+ pIPCResult->hr = CORDBG_E_NOTREADY;
+ }
+ else
+ {
+ DebuggerStepper * pStepper;
+
+ if (pEvent->StepData.IsJMCStop)
+ {
+ pStepper = new (interopsafe, nothrow) DebuggerJMCStepper(pThread,
+ pEvent->StepData.rgfMappingStop,
+ pEvent->StepData.rgfInterceptStop,
+ pAppDomain);
+ }
+ else
+ {
+ pStepper = new (interopsafe, nothrow) DebuggerStepper(pThread,
+ pEvent->StepData.rgfMappingStop,
+ pEvent->StepData.rgfInterceptStop,
+ pAppDomain);
+ }
+
+
+ if (pStepper == NULL)
+ {
+ pIPCResult->hr = E_OUTOFMEMORY;
+ m_pRCThread->SendIPCReply();
+
+ break;
+ }
+
+ TRACE_ALLOC(pStepper);
+
+ // Safe to stack trace b/c we're stopped.
+ StackTraceTicket ticket(pThread);
+
+ pStepper->StepOut(pEvent->StepData.frameToken, ticket);
+
+ pIPCResult->StepData.stepperToken.Set(pStepper);
+ }
+
+ m_pRCThread->SendIPCReply();
+ }
+ break;
+
+ case DB_IPCE_BREAKPOINT_REMOVE:
+ {
+ // <TODO>@todo memory allocation - bad if we're synced</TODO>
+
+ DebuggerBreakpoint * pDebuggerBP = pEvent->BreakpointData.breakpointToken.UnWrap();
+
+ pDebuggerBP->Delete();
+ }
+ break;
+
+ case DB_IPCE_STEP_CANCEL:
+ {
+ // <TODO>@todo memory allocation - bad if we're synced</TODO>
+ LOG((LF_CORDB,LL_INFO10000, "D:HIPCE:Got STEP_CANCEL for stepper 0x%p\n",
+ pEvent->StepData.stepperToken.UnWrap()));
+
+ DebuggerStepper * pStepper = pEvent->StepData.stepperToken.UnWrap();
+
+ pStepper->Delete();
+ }
+ break;
+
+ case DB_IPCE_SET_ALL_DEBUG_STATE:
+ {
+ Thread * pThread = pEvent->SetAllDebugState.vmThreadToken.GetRawPtr();
+ CorDebugThreadState debugState = pEvent->SetAllDebugState.debugState;
+
+ LOG((LF_CORDB,LL_INFO10000,"HandleIPCE: SetAllDebugState: except thread 0x%08x (ID:0x%x) to state 0x%x\n",
+ pThread,
+ (pThread != NULL) ? GetThreadIdHelper(pThread) : 0,
+ debugState));
+
+ if (!g_fProcessDetach)
+ {
+ g_pEEInterface->SetAllDebugState(pThread, debugState);
+ }
+
+ STRESS_LOG1(LF_CORDB,LL_INFO10000,"HandleIPC: Got 0x%x back from SetAllDebugState\n", hr);
+
+ // Just send back an HR.
+ DebuggerIPCEvent * pIPCResult = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ PREFIX_ASSUME(pIPCResult != NULL);
+
+ InitIPCEvent(pIPCResult, DB_IPCE_SET_DEBUG_STATE_RESULT, NULL, NULL);
+
+ pIPCResult->hr = S_OK;
+
+ m_pRCThread->SendIPCReply();
+ }
+ break;
+
+ case DB_IPCE_GET_GCHANDLE_INFO:
+ // Given an unvalidated GC-handle, find out all the info about it to view the object
+ // at the other end
+ {
+ OBJECTHANDLE objectHandle = pEvent->GetGCHandleInfo.GCHandle.GetRawPtr();
+
+ DebuggerIPCEvent * pIPCResult = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ PREFIX_ASSUME(pIPCResult != NULL);
+
+ InitIPCEvent(pIPCResult, DB_IPCE_GET_GCHANDLE_INFO_RESULT, NULL, NULL);
+
+ bool fValid = SUCCEEDED(ValidateGCHandle(objectHandle));
+
+ AppDomain * pAppDomain = NULL;
+
+ if(fValid)
+ {
+ // Get the appdomain
+ ADIndex appDomainIndex = HndGetHandleADIndex(objectHandle);
+ pAppDomain = SystemDomain::GetAppDomainAtIndex(appDomainIndex);
+
+ _ASSERTE(pAppDomain != NULL);
+ }
+
+ pIPCResult->hr = S_OK;
+ pIPCResult->GetGCHandleInfoResult.vmAppDomain.SetRawPtr(pAppDomain);
+ pIPCResult->GetGCHandleInfoResult.fValid = fValid;
+
+ m_pRCThread->SendIPCReply();
+
+ }
+ break;
+
+ case DB_IPCE_GET_BUFFER:
+ {
+ GetAndSendBuffer(m_pRCThread, pEvent->GetBuffer.bufSize);
+ }
+ break;
+
+ case DB_IPCE_RELEASE_BUFFER:
+ {
+ SendReleaseBuffer(m_pRCThread, pEvent->ReleaseBuffer.pBuffer);
+ }
+ break;
+#ifdef EnC_SUPPORTED
+ case DB_IPCE_APPLY_CHANGES:
+ {
+ LOG((LF_ENC, LL_INFO100, "D::HIPCE: DB_IPCE_APPLY_CHANGES 1\n"));
+
+ DebuggerModule * pDebuggerModule = LookupOrCreateModule(pEvent->ApplyChanges.vmDomainFile);
+ //
+ // @todo handle error.
+ //
+ hr = ApplyChangesAndSendResult(pDebuggerModule,
+ pEvent->ApplyChanges.cbDeltaMetadata,
+ (BYTE*) CORDB_ADDRESS_TO_PTR(pEvent->ApplyChanges.pDeltaMetadata),
+ pEvent->ApplyChanges.cbDeltaIL,
+ (BYTE*) CORDB_ADDRESS_TO_PTR(pEvent->ApplyChanges.pDeltaIL));
+
+ LOG((LF_ENC, LL_INFO100, "D::HIPCE: DB_IPCE_APPLY_CHANGES 2\n"));
+ }
+ break;
+#endif // EnC_SUPPORTED
+
+ case DB_IPCE_SET_CLASS_LOAD_FLAG:
+ {
+ DebuggerModule *pDebuggerModule = LookupOrCreateModule(pEvent->SetClassLoad.vmDomainFile);
+
+ _ASSERTE(pDebuggerModule != NULL);
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "D::HIPCE: class load flag is %d for module 0x%p\n",
+ pEvent->SetClassLoad.flag,
+ pDebuggerModule));
+
+ pDebuggerModule->EnableClassLoadCallbacks((BOOL)pEvent->SetClassLoad.flag);
+ }
+ break;
+
+ case DB_IPCE_IS_TRANSITION_STUB:
+ GetAndSendTransitionStubInfo((CORDB_ADDRESS_TYPE*)pEvent->IsTransitionStub.address);
+ break;
+
+ case DB_IPCE_MODIFY_LOGSWITCH:
+ g_pEEInterface->DebuggerModifyingLogSwitch (pEvent->LogSwitchSettingMessage.iLevel,
+ pEvent->LogSwitchSettingMessage.szSwitchName.GetString());
+
+ break;
+
+ case DB_IPCE_ENABLE_LOG_MESSAGES:
+ {
+ bool fOnOff = pEvent->LogSwitchSettingMessage.iLevel ? true : false;
+ EnableLogMessages (fOnOff);
+ }
+ break;
+
+ case DB_IPCE_SET_IP:
+
+ {
+ // This is a synchronous event (reply required)
+ DebuggerIPCEvent * pIPCResult = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ // Don't have an explicit reply msg
+ InitIPCReply(pIPCResult, DB_IPCE_SET_IP);
+
+ if (temporaryHelp)
+ {
+ pIPCResult->hr = CORDBG_E_NOTREADY;
+ }
+ else if (!g_fProcessDetach)
+ {
+ //
+ // Since this pointer is coming from the RS, it may be NULL or something
+ // unexpected in an OOM situation. Quickly just sanity check them.
+ //
+ Thread * pThread = pEvent->SetIP.vmThreadToken.GetRawPtr();
+ Module * pModule = pEvent->SetIP.vmDomainFile.GetRawPtr()->GetModule();
+
+ // Get the DJI for this function
+ DebuggerMethodInfo * pDMI = GetOrCreateMethodInfo(pModule, pEvent->SetIP.mdMethod);
+ DebuggerJitInfo * pDJI = NULL;
+ if (pDMI != NULL)
+ {
+ // In the EnC case, if we look for an older version, we need to find the DJI by starting
+ // address, rather than just by MethodDesc. In the case of generics, we may need to create a DJI, so we
+ pDJI = pDMI->FindJitInfo(pEvent->SetIP.vmMethodDesc.GetRawPtr(),
+ (TADDR)pEvent->SetIP.startAddress);
+ if (pDJI == NULL)
+ {
+ // In the case of other functions, we may need to lazily create a DJI, so we need
+ // FindOrCreate semantics for those.
+ pDJI = pDMI->FindOrCreateInitAndAddJitInfo(pEvent->SetIP.vmMethodDesc.GetRawPtr());
+ }
+ }
+
+ if ((pDJI != NULL) && (pThread != NULL) && (pModule != NULL))
+ {
+ CHECK_IF_CAN_TAKE_HELPER_LOCKS_IN_THIS_SCOPE(&(pIPCResult->hr), GetCanary());
+
+ if (SUCCEEDED(pIPCResult->hr))
+ {
+ pIPCResult->hr = SetIP(pEvent->SetIP.fCanSetIPOnly,
+ pThread,
+ pModule,
+ pEvent->SetIP.mdMethod,
+ pDJI,
+ pEvent->SetIP.offset,
+ pEvent->SetIP.fIsIL
+ );
+ }
+ }
+ else
+ {
+ pIPCResult->hr = E_INVALIDARG;
+ }
+ }
+ else
+ {
+ pIPCResult->hr = S_OK;
+ }
+
+ // Send the result
+ m_pRCThread->SendIPCReply();
+ }
+ break;
+
+ case DB_IPCE_DETACH_FROM_PROCESS:
+ LOG((LF_CORDB, LL_INFO10000, "Detaching from process!\n"));
+
+ // Delete all controllers (remove patches etc.)
+ DebuggerController::DeleteAllControllers();
+ // Note that we'd like to be able to do this assert here
+ // _ASSERTE(DebuggerController::GetNumberOfPatches() == 0);
+ // However controllers may get queued for deletion if there is outstanding
+ // work and so we can't gaurentee the deletion will complete now.
+ // @dbgtodo inspection: This shouldn't be an issue in the complete V3 architecture
+
+ MarkDebuggerUnattachedInternal();
+
+ m_pRCThread->RightSideDetach();
+
+
+ // Clear JMC status
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "Setting all JMC methods to false:\n"));
+ // On detach, set all DMI's JMC status to false.
+ // We have to do this b/c we clear the DebuggerModules and allocated
+ // new ones on re-attach; and the DMI & DM need to be in sync
+ // (in this case, agreeing that JMC-status = false).
+ // This also syncs the EE modules and disables all JMC probes.
+ DebuggerMethodInfoTable * pMethodInfoTable = g_pDebugger->GetMethodInfoTable();
+
+ if (pMethodInfoTable != NULL)
+ {
+ HASHFIND hashFind;
+ DebuggerDataLockHolder debuggerDataLockHolder(this);
+
+ for (DebuggerMethodInfo * pMethodInfo = pMethodInfoTable->GetFirstMethodInfo(&hashFind);
+ pMethodInfo != NULL;
+ pMethodInfo = pMethodInfoTable->GetNextMethodInfo(&hashFind))
+ {
+ pMethodInfo->SetJMCStatus(false);
+ }
+ }
+ LOG((LF_CORDB, LL_EVERYTHING, "Done clearing JMC methods!\n"));
+ }
+
+ // Clean up the hash of DebuggerModules
+ // This method is overridden to also free all DebuggerModule objects
+ if (m_pModules != NULL)
+ {
+
+ // Removes all DebuggerModules
+ DebuggerDataLockHolder ch(this);
+ m_pModules->Clear();
+
+ }
+
+ // Reply to the detach message before we release any Runtime threads. This ensures that the debugger will get
+ // the detach reply before the process exits if the main thread is near exiting.
+ m_pRCThread->SendIPCReply();
+
+ // Let the process run free now... there is no debugger to bother it anymore.
+ fContinue = ResumeThreads(NULL);
+
+ //
+ // Go ahead and release the TSL now that we're continuing. This ensures that we've held
+ // the thread store lock the entire time the Runtime was just stopped.
+ //
+ ThreadSuspend::UnlockThreadStore(FALSE, ThreadSuspend::SUSPEND_FOR_DEBUGGER);
+ break;
+
+#ifndef DACCESS_COMPILE
+
+ case DB_IPCE_FUNC_EVAL:
+ {
+ // This is a synchronous event (reply required)
+ pEvent = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ Thread * pThread = pEvent->FuncEval.vmThreadToken.GetRawPtr();
+
+ InitIPCEvent(pEvent, DB_IPCE_FUNC_EVAL_SETUP_RESULT, pThread, pThread->GetDomain());
+
+ BYTE * pbArgDataArea = NULL;
+ DebuggerEval * pDebuggerEvalKey = NULL;
+
+ pEvent->hr = FuncEvalSetup(&(pEvent->FuncEval), &pbArgDataArea, &pDebuggerEvalKey);
+
+ // Send the result of how the func eval setup went.
+ pEvent->FuncEvalSetupComplete.argDataArea = PTR_TO_CORDB_ADDRESS(pbArgDataArea);
+ pEvent->FuncEvalSetupComplete.debuggerEvalKey.Set(pDebuggerEvalKey);
+
+ m_pRCThread->SendIPCReply();
+ }
+
+ break;
+
+#endif
+
+ case DB_IPCE_SET_REFERENCE:
+ {
+ // This is a synchronous event (reply required)
+ pEvent = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ InitIPCReply(pEvent, DB_IPCE_SET_REFERENCE_RESULT);
+
+ pEvent->hr = SetReference(pEvent->SetReference.objectRefAddress,
+ pEvent->SetReference.vmObjectHandle,
+ pEvent->SetReference.newReference);
+
+ // Send the result of how the set reference went.
+ m_pRCThread->SendIPCReply();
+ }
+ break;
+
+ case DB_IPCE_SET_VALUE_CLASS:
+ {
+ // This is a synchronous event (reply required)
+ pEvent = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ InitIPCReply(pEvent, DB_IPCE_SET_VALUE_CLASS_RESULT);
+
+ pEvent->hr = SetValueClass(pEvent->SetValueClass.oldData,
+ pEvent->SetValueClass.newData,
+ &pEvent->SetValueClass.type);
+
+ // Send the result of how the set reference went.
+ m_pRCThread->SendIPCReply();
+ }
+ break;
+
+ case DB_IPCE_GET_THREAD_FOR_TASKID:
+ {
+ TASKID taskid = pEvent->GetThreadForTaskId.taskid;
+ Thread *pThread = ThreadStore::GetThreadList(NULL);
+ Thread *pThreadRet = NULL;
+
+ while (pThread != NULL)
+ {
+ if (pThread->GetTaskId() == taskid)
+ {
+ pThreadRet = pThread;
+ break;
+ }
+ pThread = ThreadStore::GetThreadList(pThread);
+ }
+
+ // This is a synchronous event (reply required)
+ pEvent = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ InitIPCReply(pEvent, DB_IPCE_GET_THREAD_FOR_TASKID_RESULT);
+
+ pEvent->GetThreadForTaskIdResult.vmThreadToken.SetRawPtr(pThreadRet);
+ pEvent->hr = S_OK;
+
+ m_pRCThread->SendIPCReply();
+ }
+ break;
+
+ case DB_IPCE_CREATE_HANDLE:
+ {
+ Object * pObject = (Object*)pEvent->CreateHandle.objectToken;
+ OBJECTREF objref = ObjectToOBJECTREF(pObject);
+ AppDomain * pAppDomain = pEvent->vmAppDomain.GetRawPtr();
+ BOOL fStrong = pEvent->CreateHandle.fStrong;
+ OBJECTHANDLE objectHandle;
+
+ // This is a synchronous event (reply required)
+ pEvent = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ InitIPCReply(pEvent, DB_IPCE_CREATE_HANDLE_RESULT);
+
+ {
+ // Handle creation may need to allocate memory.
+ // The API specifically limits the number of handls Cordbg can create,
+ // so we could preallocate and fail allocating anything beyond that.
+ CHECK_IF_CAN_TAKE_HELPER_LOCKS_IN_THIS_SCOPE(&(pEvent->hr), GetCanary());
+
+ if (SUCCEEDED(pEvent->hr))
+ {
+ if (fStrong == TRUE)
+ {
+ // create strong handle
+ objectHandle = pAppDomain->CreateStrongHandle(objref);
+ }
+ else
+ {
+ // create the weak long handle
+ objectHandle = pAppDomain->CreateLongWeakHandle(objref);
+ }
+ pEvent->CreateHandleResult.vmObjectHandle.SetRawPtr(objectHandle);
+ }
+ }
+
+ m_pRCThread->SendIPCReply();
+ break;
+ }
+
+ case DB_IPCE_DISPOSE_HANDLE:
+ {
+ // DISPOSE an object handle
+ OBJECTHANDLE objectHandle = pEvent->DisposeHandle.vmObjectHandle.GetRawPtr();
+
+ if (pEvent->DisposeHandle.fStrong == TRUE)
+ {
+ DestroyStrongHandle(objectHandle);
+ }
+ else
+ {
+ DestroyLongWeakHandle(objectHandle);
+ }
+ break;
+ }
+
+#ifndef DACCESS_COMPILE
+
+ case DB_IPCE_FUNC_EVAL_ABORT:
+ {
+ LOG((LF_CORDB, LL_INFO1000, "D::HIPCE: Got FuncEvalAbort for pDE:%08x\n",
+ pEvent->FuncEvalAbort.debuggerEvalKey.UnWrap()));
+
+ // This is a synchronous event (reply required)
+
+ pEvent = m_pRCThread->GetIPCEventReceiveBuffer();
+ InitIPCReply(pEvent,DB_IPCE_FUNC_EVAL_ABORT_RESULT);
+
+ pEvent->hr = FuncEvalAbort(pEvent->FuncEvalAbort.debuggerEvalKey.UnWrap());
+
+ m_pRCThread->SendIPCReply();
+ }
+ break;
+
+ case DB_IPCE_FUNC_EVAL_RUDE_ABORT:
+ {
+ LOG((LF_CORDB, LL_INFO1000, "D::HIPCE: Got FuncEvalRudeAbort for pDE:%08x\n",
+ pEvent->FuncEvalRudeAbort.debuggerEvalKey.UnWrap()));
+
+ // This is a synchronous event (reply required)
+
+ pEvent = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ InitIPCReply(pEvent, DB_IPCE_FUNC_EVAL_RUDE_ABORT_RESULT);
+
+ pEvent->hr = FuncEvalRudeAbort(pEvent->FuncEvalRudeAbort.debuggerEvalKey.UnWrap());
+
+ m_pRCThread->SendIPCReply();
+ }
+ break;
+
+ case DB_IPCE_FUNC_EVAL_CLEANUP:
+
+ // This is a synchronous event (reply required)
+
+ pEvent = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ InitIPCReply(pEvent,DB_IPCE_FUNC_EVAL_CLEANUP_RESULT);
+
+ pEvent->hr = FuncEvalCleanup(pEvent->FuncEvalCleanup.debuggerEvalKey.UnWrap());
+
+ m_pRCThread->SendIPCReply();
+
+ break;
+
+#endif
+
+ case DB_IPCE_CONTROL_C_EVENT_RESULT:
+ {
+ // store the result of whether the event has been handled by the debugger and
+ // wake up the thread waiting for the result
+ SetDebuggerHandlingCtrlC(pEvent->hr == S_OK);
+ VERIFY(SetEvent(GetCtrlCMutex()));
+ }
+ break;
+
+ // Set the JMC status on invididual methods
+ case DB_IPCE_SET_METHOD_JMC_STATUS:
+ {
+ // Get the info out of the event
+ DebuggerModule * pDebuggerModule = LookupOrCreateModule(pEvent->SetJMCFunctionStatus.vmDomainFile);
+ Module * pModule = pDebuggerModule->GetRuntimeModule();
+
+ bool fStatus = (pEvent->SetJMCFunctionStatus.dwStatus != 0);
+
+ mdMethodDef token = pEvent->SetJMCFunctionStatus.funcMetadataToken;
+
+ // Prepare reply
+ pEvent = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ InitIPCEvent(pEvent, DB_IPCE_SET_METHOD_JMC_STATUS_RESULT, NULL, NULL);
+
+ pEvent->hr = S_OK;
+
+ if (pDebuggerModule->HasAnyOptimizedCode() && fStatus)
+ {
+ // If there's optimized code, then we can't be set JMC status to true.
+ // That's because JMC probes are not injected in optimized code, and we
+ // need a JMC probe to have a JMC function.
+ pEvent->hr = CORDBG_E_CANT_SET_TO_JMC;
+ }
+ else
+ {
+ DebuggerDataLockHolder debuggerDataLockHolder(this);
+ // This may be called on an unjitted method, so we may
+ // have to create the MethodInfo.
+ DebuggerMethodInfo * pMethodInfo = GetOrCreateMethodInfo(pModule, token);
+
+ if (pMethodInfo == NULL)
+ {
+ pEvent->hr = E_OUTOFMEMORY;
+ }
+ else
+ {
+ // Update the storage on the LS
+ pMethodInfo->SetJMCStatus(fStatus);
+ }
+ }
+
+ // Send reply
+ m_pRCThread->SendIPCReply();
+ }
+ break;
+
+ // Get the JMC status on a given function
+ case DB_IPCE_GET_METHOD_JMC_STATUS:
+ {
+ // Get the method
+ DebuggerModule * pDebuggerModule = LookupOrCreateModule(pEvent->SetJMCFunctionStatus.vmDomainFile);
+
+ Module * pModule = pDebuggerModule->GetRuntimeModule();
+
+ mdMethodDef token = pEvent->SetJMCFunctionStatus.funcMetadataToken;
+
+ // Init reply
+ pEvent = m_pRCThread->GetIPCEventReceiveBuffer();
+ InitIPCEvent(pEvent, DB_IPCE_GET_METHOD_JMC_STATUS_RESULT, NULL, NULL);
+
+ //
+ // This may be called on an unjitted method, so we may
+ // have to create the MethodInfo.
+ //
+ DebuggerMethodInfo * pMethodInfo = GetOrCreateMethodInfo(pModule, token);
+
+ if (pMethodInfo == NULL)
+ {
+ pEvent->hr = E_OUTOFMEMORY;
+ }
+ else
+ {
+ bool fStatus = pMethodInfo->IsJMCFunction();
+ pEvent->SetJMCFunctionStatus.dwStatus = fStatus;
+ pEvent->hr = S_OK;
+ }
+
+ m_pRCThread->SendIPCReply();
+ }
+ break;
+
+ case DB_IPCE_SET_MODULE_JMC_STATUS:
+ {
+ // Get data out of event
+ DebuggerModule * pDebuggerModule = LookupOrCreateModule(pEvent->SetJMCFunctionStatus.vmDomainFile);
+
+ bool fStatus = (pEvent->SetJMCFunctionStatus.dwStatus != 0);
+
+ // Prepare reply
+ pEvent = m_pRCThread->GetIPCEventReceiveBuffer();
+
+ InitIPCReply(pEvent, DB_IPCE_SET_MODULE_JMC_STATUS_RESULT);
+
+ pEvent->hr = S_OK;
+
+ if (pDebuggerModule->HasAnyOptimizedCode() && fStatus)
+ {
+ // If there's optimized code, then we can't be set JMC status to true.
+ // That's because JMC probes are not injected in optimized code, and we
+ // need a JMC probe to have a JMC function.
+ pEvent->hr = CORDBG_E_CANT_SET_TO_JMC;
+ }
+ else
+ {
+ g_pDebugger->SetModuleDefaultJMCStatus(pDebuggerModule->GetRuntimeModule(), fStatus);
+ }
+
+
+
+ // Send reply
+ m_pRCThread->SendIPCReply();
+ }
+ break;
+
+
+ case DB_IPCE_INTERCEPT_EXCEPTION:
+ GetAndSendInterceptCommand(pEvent);
+ break;
+
+ case DB_IPCE_RESOLVE_UPDATE_METADATA_1:
+ {
+
+ LOG((LF_CORDB, LL_INFO10000, "D::HIPCE Handling DB_IPCE_RESOLVE_UPDATE_METADATA_1\n"));
+ // This isn't ideal - Making SerializeModuleMetaData not call new is hard,
+ // but the odds of trying to load a module after a thread is stopped w/
+ // the heap lock should be pretty low.
+ // All of the metadata calls can violate this and call new.
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+
+ Module * pModule = pEvent->MetadataUpdateRequest.vmModule.GetRawPtr();
+ LOG((LF_CORDB, LL_INFO100000, "D::HIPCE Got module 0x%x\n", pModule));
+
+ DWORD countBytes = 0;
+
+ // This will allocate memory. Debugger will then copy from here and send a
+ // DB_IPCE_RESOLVE_UPDATE_METADATA_2 to free this memory.
+ BYTE* pData = NULL;
+ EX_TRY
+ {
+ LOG((LF_CORDB, LL_INFO100000, "D::HIPCE Calling SerializeModuleMetaData\n"));
+ pData = SerializeModuleMetaData(pModule, &countBytes);
+
+ }
+ EX_CATCH_HRESULT(hr);
+
+ LOG((LF_CORDB, LL_INFO100000, "D::HIPCE hr is 0x%x\n", hr));
+
+ DebuggerIPCEvent * pResult = m_pRCThread->GetIPCEventReceiveBuffer();
+ InitIPCEvent(pResult, DB_IPCE_RESOLVE_UPDATE_METADATA_1_RESULT, NULL, NULL);
+
+ pResult->MetadataUpdateRequest.pMetadataStart = pData;
+ pResult->MetadataUpdateRequest.nMetadataSize = countBytes;
+ pResult->hr = hr;
+ LOG((LF_CORDB, LL_INFO1000000, "D::HIPCE metadataStart=0x%x, nMetadataSize=0x%x\n", pData, countBytes));
+
+ m_pRCThread->SendIPCReply();
+ LOG((LF_CORDB, LL_INFO1000000, "D::HIPCE reply sent\n"));
+ }
+ break;
+
+ case DB_IPCE_RESOLVE_UPDATE_METADATA_2:
+ {
+ // Delete memory allocated with DB_IPCE_RESOLVE_UPDATE_METADATA_1.
+ BYTE * pData = (BYTE *) pEvent->MetadataUpdateRequest.pMetadataStart;
+ DeleteInteropSafe(pData);
+
+ DebuggerIPCEvent * pResult = m_pRCThread->GetIPCEventReceiveBuffer();
+ InitIPCEvent(pResult, DB_IPCE_RESOLVE_UPDATE_METADATA_2_RESULT, NULL, NULL);
+ pResult->hr = S_OK;
+ m_pRCThread->SendIPCReply();
+ }
+
+ break;
+
+#ifdef FEATURE_LEGACYNETCF_DBG_HOST_CONTROL
+ case DB_IPCE_NETCF_HOST_CONTROL_PAUSE:
+ {
+ LOG((LF_CORDB, LL_INFO10000, "D::HIPCE Handling DB_IPCE_NETCF_HOST_CONTROL_PAUSE\n"));
+ InvokeLegacyNetCFHostPauseCallback();
+
+ DebuggerIPCEvent * pResult = m_pRCThread->GetIPCEventReceiveBuffer();
+ InitIPCEvent(pResult, DB_IPCE_NETCF_HOST_CONTROL_PAUSE_RESULT, NULL, NULL);
+ pResult->hr = S_OK;
+ m_pRCThread->SendIPCReply();
+ }
+
+ break;
+
+ case DB_IPCE_NETCF_HOST_CONTROL_RESUME:
+ {
+ LOG((LF_CORDB, LL_INFO10000, "D::HIPCE Handling DB_IPCE_NETCF_HOST_CONTROL_RESUME\n"));
+ InvokeLegacyNetCFHostResumeCallback();
+
+ DebuggerIPCEvent * pResult = m_pRCThread->GetIPCEventReceiveBuffer();
+ InitIPCEvent(pResult, DB_IPCE_NETCF_HOST_CONTROL_RESUME_RESULT, NULL, NULL);
+ pResult->hr = S_OK;
+ m_pRCThread->SendIPCReply();
+ }
+
+ break;
+#endif // FEATURE_LEGACYNETCF_DBG_HOST_CONTROL
+
+ default:
+ // We should never get an event that we don't know about.
+ CONSISTENCY_CHECK_MSGF(false, ("Unknown Debug-Event on LS:id=0x%08x.", pEvent->type));
+ LOG((LF_CORDB, LL_INFO10000, "Unknown event type: 0x%08x\n",
+ pEvent->type));
+ }
+
+ STRESS_LOG0(LF_CORDB, LL_INFO10000, "D::HIPCE: finished handling event\n");
+
+ // dbgLockHolder goes out of scope - implicit Release
+ return fContinue;
+}
+#ifdef _PREFAST_
+#pragma warning(pop)
+#endif
+
+/*
+ * GetAndSendInterceptCommand
+ *
+ * This function processes an INTERCEPT_EXCEPTION IPC event, sending the appropriate response.
+ *
+ * Parameters:
+ * event - the event to process.
+ *
+ * Returns:
+ * hr - HRESULT.
+ *
+ */
+HRESULT Debugger::GetAndSendInterceptCommand(DebuggerIPCEvent *event)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS_FROM_GETJITINFO;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ _ASSERTE((event->type & DB_IPCE_TYPE_MASK) == DB_IPCE_INTERCEPT_EXCEPTION);
+
+ //
+ // Simple state validation first.
+ //
+ Thread *pThread = event->InterceptException.vmThreadToken.GetRawPtr();
+
+ if ((pThread != NULL) &&
+ !m_forceNonInterceptable &&
+ IsInterceptableException(pThread))
+ {
+ ThreadExceptionState* pExState = pThread->GetExceptionState();
+
+ // We can only have one interception going on at any given time.
+ if (!pExState->GetFlags()->DebuggerInterceptInfo())
+ {
+ //
+ // Now start processing the parameters from the event.
+ //
+ FramePointer targetFramePointer = event->InterceptException.frameToken;
+
+ ControllerStackInfo csi;
+
+ // Safe because we're stopped.
+ StackTraceTicket ticket(pThread);
+ csi.GetStackInfo(ticket, pThread, targetFramePointer, NULL);
+
+ if (csi.m_targetFrameFound)
+ {
+ //
+ // If the target frame is below the point where the current exception was
+ // thrown from, then we should reject this interception command. This
+ // can happen in a func-eval during an exception callback, or during a
+ // breakpoint in a filter function. Or it can just be a user error.
+ //
+ CONTEXT* pContext = pExState->GetContextRecord();
+
+ // This is an approximation on IA64, where we should use the caller SP instead of
+ // the current SP. However, if the targetFramePointer is valid, the comparison should
+ // still work. targetFramePointer should be valid because it ultimately comes from a
+ // full stackwalk.
+ FramePointer excepFramePointer = FramePointer::MakeFramePointer(GetSP(pContext));
+
+ if (IsCloserToRoot(excepFramePointer, targetFramePointer))
+ {
+ hr = CORDBG_E_CURRENT_EXCEPTION_IS_OUTSIDE_CURRENT_EXECUTION_SCOPE;
+ goto LSendResponse;
+ }
+
+
+ //
+ // If the instruction that faulted is not in this managed code, at the leaf
+ // frame, then the IP is actually the return address from the managed or
+ // unmanaged function that really did fault. Thus, we actually want the
+ // IP of the call instruction. I fake this by simply subtracting 1 from
+ // the IP, which is close enough approximation for the search below.
+ //
+ if (pExState->GetContextRecord() != NULL)
+ {
+ // If the faulting instruction is not in managed code, then the interception frame
+ // must be non-leaf.
+ if (!g_pEEInterface->IsManagedNativeCode((BYTE *)(GetIP(pExState->GetContextRecord()))))
+ {
+ csi.m_activeFrame.relOffset--;
+ }
+ else
+ {
+ MethodDesc *pMethodDesc = g_pEEInterface->GetNativeCodeMethodDesc(dac_cast<PCODE>(GetIP(pExState->GetContextRecord())));
+
+ // check if the interception frame is the leaf frame
+ if ((pMethodDesc == NULL) ||
+ (pMethodDesc != csi.m_activeFrame.md) ||
+ (GetSP(pExState->GetContextRecord()) != GetRegdisplaySP(&(csi.m_activeFrame.registers))))
+ {
+ csi.m_activeFrame.relOffset--;
+ }
+ }
+ }
+
+ //
+ // Now adjust the IP to be the previous zero-stack depth sequence point.
+ //
+ SIZE_T foundOffset = 0;
+ DebuggerJitInfo *pJitInfo = csi.m_activeFrame.GetJitInfoFromFrame();
+
+ if (pJitInfo != NULL)
+ {
+ ICorDebugInfo::SourceTypes src;
+
+ ULONG relOffset = csi.m_activeFrame.relOffset;
+
+#if defined(WIN64EXCEPTIONS)
+ int funcletIndex = PARENT_METHOD_INDEX;
+
+ // For funclets, we need to make sure that the stack empty sequence point we use is
+ // in the same funclet as the current offset.
+ if (csi.m_activeFrame.IsFuncletFrame())
+ {
+ funcletIndex = pJitInfo->GetFuncletIndex(relOffset, DebuggerJitInfo::GFIM_BYOFFSET);
+ }
+
+ // Refer to the loop using pMap below.
+ DebuggerILToNativeMap* pMap = NULL;
+#endif // WIN64EXCEPTIONS
+
+ for (unsigned int i = 0; i < pJitInfo->GetSequenceMapCount(); i++)
+ {
+ SIZE_T startOffset = pJitInfo->GetSequenceMap()[i].nativeStartOffset;
+
+ if (DbgIsSpecialILOffset(pJitInfo->GetSequenceMap()[i].ilOffset))
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "D::HIPCE: not placing breakpoint at special offset 0x%x\n", startOffset));
+ continue;
+ }
+
+ if ((i >= 1) && (startOffset == pJitInfo->GetSequenceMap()[i-1].nativeStartOffset))
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "D::HIPCE: not placing redundant breakpoint at duplicate offset 0x%x\n", startOffset));
+ continue;
+ }
+
+ if (startOffset > relOffset)
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "D::HIPCE: Stopping scan for breakpoint at offset 0x%x\n", startOffset));
+ continue;
+ }
+
+ src = pJitInfo->GetSequenceMap()[i].source;
+
+ if (!(src & ICorDebugInfo::STACK_EMPTY))
+ {
+ LOG((LF_CORDB, LL_INFO10000, "D::HIPCE: not placing E&C breakpoint at offset "
+ "0x%x b/c not STACK_EMPTY:it's 0x%x\n", startOffset, src));
+ continue;
+ }
+
+ if ((foundOffset < startOffset) && (startOffset <= relOffset)
+#if defined(WIN64EXCEPTIONS)
+ // Check if we are still in the same funclet.
+ && (funcletIndex == pJitInfo->GetFuncletIndex(startOffset, DebuggerJitInfo::GFIM_BYOFFSET))
+#endif // WIN64EXCEPTIONS
+ )
+ {
+ LOG((LF_CORDB, LL_INFO10000, "D::HIPCE: updating breakpoint at native offset 0x%x\n",
+ startOffset));
+ foundOffset = startOffset;
+#if defined(WIN64EXCEPTIONS)
+ // Save the map entry for modification later.
+ pMap = &(pJitInfo->GetSequenceMap()[i]);
+#endif // WIN64EXCEPTIONS
+ }
+ }
+
+#if defined(WIN64EXCEPTIONS)
+ // This is nasty. Starting recently we could have multiple sequence points with the same IL offset
+ // in the SAME funclet/parent method (previously different sequence points with the same IL offset
+ // imply that they are in different funclet/parent method). Fortunately, we only run into this
+ // if we have a loop which throws a range check failed exception. The code for throwing the
+ // exception executes out of line (this is JIT-specific, of course). The following loop makes sure
+ // that when we interecept the exception, we intercept it at the smallest native offset instead
+ // of intercepting it right before we throw the exception.
+ for (/* no initialization */; pMap > pJitInfo->GetSequenceMap() ; pMap--)
+ {
+ if (pMap->ilOffset == (pMap-1)->ilOffset)
+ {
+ foundOffset = (pMap-1)->nativeStartOffset;
+ }
+ else
+ {
+ break;
+ }
+ }
+ _ASSERTE(foundOffset < relOffset);
+#endif // WIN64EXCEPTIONS
+
+ //
+ // Set up a breakpoint on the intercept IP
+ //
+ DebuggerContinuableExceptionBreakpoint *pBreakpoint;
+
+ pBreakpoint = new (interopsafe, nothrow) DebuggerContinuableExceptionBreakpoint(pThread,
+ foundOffset,
+ pJitInfo,
+ csi.m_activeFrame.currentAppDomain
+ );
+
+ if (pBreakpoint != NULL)
+ {
+ //
+ // Set up the VM side of intercepting.
+ //
+ if (pExState->GetDebuggerState()->SetDebuggerInterceptInfo(csi.m_activeFrame.pIJM,
+ pThread,
+ csi.m_activeFrame.MethodToken,
+ csi.m_activeFrame.md,
+ foundOffset,
+#ifdef _TARGET_ARM_
+ // ARM requires the caller stack pointer, not the current stack pointer
+ CallerStackFrame::FromRegDisplay(&(csi.m_activeFrame.registers)),
+#else
+ StackFrame::FromRegDisplay(&(csi.m_activeFrame.registers)),
+#endif
+ pExState->GetFlags()
+ ))
+ {
+ //
+ // Make sure no more exception callbacks come thru.
+ //
+ pExState->GetFlags()->SetSentDebugFirstChance();
+ pExState->GetFlags()->SetSentDebugUserFirstChance();
+ pExState->GetFlags()->SetSentDebugUnwindBegin();
+
+ //
+ // Save off this breakpoint, so that if the exception gets unwound before we hit
+ // the breakpoint - the exeception info can call back to remove it.
+ //
+ pExState->GetDebuggerState()->SetDebuggerInterceptContext((void *)pBreakpoint);
+
+ hr = S_OK;
+ }
+ else // VM could not set up for intercept
+ {
+ DeleteInteropSafe(pBreakpoint);
+ hr = E_INVALIDARG;
+ }
+
+ }
+ else // could not allocate for breakpoint
+ {
+ hr = E_OUTOFMEMORY;
+ }
+
+ }
+ else // could not get JitInfo
+ {
+ hr = E_FAIL;
+ }
+
+ }
+ else // target frame not found.
+ {
+ hr = E_INVALIDARG;
+ }
+
+ }
+ else // already set up for an intercept.
+ {
+ hr = CORDBG_E_INTERCEPT_FRAME_ALREADY_SET;
+ }
+
+ }
+ else if (pThread == NULL)
+ {
+ hr = E_INVALIDARG; // pThread is NULL.
+ }
+ else
+ {
+ hr = CORDBG_E_NONINTERCEPTABLE_EXCEPTION;
+ }
+
+LSendResponse:
+
+ //
+ // Prepare reply
+ //
+ event = m_pRCThread->GetIPCEventReceiveBuffer();
+ InitIPCReply(event, DB_IPCE_INTERCEPT_EXCEPTION_RESULT);
+ event->hr = hr;
+
+ //
+ // Send reply
+ //
+ m_pRCThread->SendIPCReply();
+
+ return hr;
+}
+
+// Poll & wait for the real helper thread to come up.
+// It's possible that the helper thread is blocked by DllMain, and so we can't
+// Wait infinite. If this poll does timeout, then it just means we're likely
+// go do helper duty instead of have the real helper do it.
+void Debugger::PollWaitingForHelper()
+{
+
+ LOG((LF_CORDB, LL_INFO10000, "PollWaitingForHelper() start\n"));
+
+ DebuggerIPCControlBlock * pDCB = g_pRCThread->GetDCB();
+
+ PREFIX_ASSUME(pDCB != NULL);
+
+ int nTotalMSToWait = 8 * 1000;
+
+ // Spin waiting for either the real helper thread or a temp. to be ready.
+ // This should never timeout unless the helper is blocked on the loader lock.
+ while (!pDCB->m_helperThreadId && !pDCB->m_temporaryHelperThreadId)
+ {
+ STRESS_LOG1(LF_CORDB,LL_INFO1000, "PollWaitForHelper. %d\n", nTotalMSToWait);
+
+ // If we hold the lock, we'll block the helper thread and this poll is not useful
+ _ASSERTE(!ThreadHoldsLock());
+
+ const DWORD dwTime = 50;
+ ClrSleepEx(dwTime, FALSE);
+ nTotalMSToWait -= dwTime;
+
+ if (nTotalMSToWait <= 0)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "PollWaitingForHelper() timeout\n"));
+ return;
+ }
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "PollWaitingForHelper() succeed\n"));
+ return;
+}
+
+
+
+
+void Debugger::TypeHandleToBasicTypeInfo(AppDomain *pAppDomain, TypeHandle th, DebuggerIPCE_BasicTypeData *res)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::THTBTI: converting left-side type handle to basic right-side type info, ELEMENT_TYPE: %d.\n", th.GetSignatureCorElementType()));
+ // GetSignatureCorElementType returns E_T_CLASS for E_T_STRING... :-(
+ if (th.IsNull())
+ {
+ res->elementType = ELEMENT_TYPE_VOID;
+ }
+ else if (th.GetMethodTable() == g_pObjectClass)
+ {
+ res->elementType = ELEMENT_TYPE_OBJECT;
+ }
+ else if (th.GetMethodTable() == g_pStringClass)
+ {
+ res->elementType = ELEMENT_TYPE_STRING;
+ }
+ else
+ {
+ res->elementType = th.GetSignatureCorElementType();
+ }
+
+ switch (res->elementType)
+ {
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_FNPTR:
+ case ELEMENT_TYPE_BYREF:
+ res->vmTypeHandle = WrapTypeHandle(th);
+ res->metadataToken = mdTokenNil;
+ res->vmDomainFile.SetRawPtr(NULL);
+ break;
+
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ res->vmTypeHandle = th.HasInstantiation() ? WrapTypeHandle(th) : VMPTR_TypeHandle::NullPtr();
+ // only set if instantiated
+ res->metadataToken = th.GetCl();
+ DebuggerModule * pDModule = LookupOrCreateModule(th.GetModule(), pAppDomain);
+ res->vmDomainFile.SetRawPtr((pDModule ? pDModule->GetDomainFile() : NULL));
+ break;
+ }
+
+ default:
+ res->vmTypeHandle = VMPTR_TypeHandle::NullPtr();
+ res->metadataToken = mdTokenNil;
+ res->vmDomainFile.SetRawPtr(NULL);
+ break;
+ }
+ return;
+}
+
+void Debugger::TypeHandleToExpandedTypeInfo(AreValueTypesBoxed boxed,
+ AppDomain *pAppDomain,
+ TypeHandle th,
+ DebuggerIPCE_ExpandedTypeData *res)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (th.IsNull())
+ {
+ res->elementType = ELEMENT_TYPE_VOID;
+ }
+ else if (th.GetMethodTable() == g_pObjectClass)
+ {
+ res->elementType = ELEMENT_TYPE_OBJECT;
+ }
+ else if (th.GetMethodTable() == g_pStringClass)
+ {
+ res->elementType = ELEMENT_TYPE_STRING;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000, "D::THTETI: converting left-side type handle to expanded right-side type info, ELEMENT_TYPE: %d.\n", th.GetSignatureCorElementType()));
+ // GetSignatureCorElementType returns E_T_CLASS for E_T_STRING... :-(
+ res->elementType = th.GetSignatureCorElementType();
+ }
+
+ switch (res->elementType)
+ {
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ _ASSERTE(th.IsArray());
+ res->ArrayTypeData.arrayRank = th.AsArray()->GetRank();
+ TypeHandleToBasicTypeInfo(pAppDomain,
+ th.AsArray()->GetArrayElementTypeHandle(),
+ &(res->ArrayTypeData.arrayTypeArg));
+ break;
+
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ if (boxed == AllBoxed)
+ {
+ res->elementType = ELEMENT_TYPE_CLASS;
+ goto treatAllValuesAsBoxed;
+ }
+ _ASSERTE(th.IsTypeDesc());
+ TypeHandleToBasicTypeInfo(pAppDomain,
+ th.AsTypeDesc()->GetTypeParam(),
+ &(res->UnaryTypeData.unaryTypeArg));
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ if (boxed == OnlyPrimitivesUnboxed || boxed == AllBoxed)
+ res->elementType = ELEMENT_TYPE_CLASS;
+ // drop through
+
+ case ELEMENT_TYPE_CLASS:
+ {
+treatAllValuesAsBoxed:
+ res->ClassTypeData.typeHandle = th.HasInstantiation() ? WrapTypeHandle(th) : VMPTR_TypeHandle::NullPtr(); // only set if instantiated
+ res->ClassTypeData.metadataToken = th.GetCl();
+ DebuggerModule * pModule = LookupOrCreateModule(th.GetModule(), pAppDomain);
+ res->ClassTypeData.vmDomainFile.SetRawPtr((pModule ? pModule->GetDomainFile() : NULL));
+ _ASSERTE(!res->ClassTypeData.vmDomainFile.IsNull());
+ break;
+ }
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ if (boxed == AllBoxed)
+ {
+ res->elementType = ELEMENT_TYPE_CLASS;
+ goto treatAllValuesAsBoxed;
+ }
+ res->NaryTypeData.typeHandle = WrapTypeHandle(th);
+ break;
+ }
+ default:
+ // The element type is sufficient, unless the type is effectively a "boxed"
+ // primitive value type...
+ if (boxed == AllBoxed)
+ {
+ res->elementType = ELEMENT_TYPE_CLASS;
+ goto treatAllValuesAsBoxed;
+ }
+ break;
+ }
+ LOG((LF_CORDB, LL_INFO10000, "D::THTETI: converted left-side type handle to expanded right-side type info, res->ClassTypeData.typeHandle = 0x%08x.\n", res->ClassTypeData.typeHandle.GetRawPtr()));
+ return;
+}
+
+
+HRESULT Debugger::BasicTypeInfoToTypeHandle(DebuggerIPCE_BasicTypeData *data, TypeHandle *pRes)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::BTITTH: expanding basic right-side type to left-side type, ELEMENT_TYPE: %d.\n", data->elementType));
+ *pRes = TypeHandle();
+ TypeHandle th;
+ switch (data->elementType)
+ {
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ _ASSERTE(!data->vmTypeHandle.IsNull());
+ th = GetTypeHandle(data->vmTypeHandle);
+ break;
+
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ if (!data->vmTypeHandle.IsNull())
+ {
+ th = GetTypeHandle(data->vmTypeHandle);
+ }
+ else
+ {
+ DebuggerModule *pDebuggerModule = g_pDebugger->LookupOrCreateModule(data->vmDomainFile);
+
+ th = g_pEEInterface->FindLoadedClass(pDebuggerModule->GetRuntimeModule(), data->metadataToken);
+ if (th.IsNull())
+ {
+ LOG((LF_CORDB, LL_INFO10000, "D::ETITTH: class isn't loaded.\n"));
+ return CORDBG_E_CLASS_NOT_LOADED;
+ }
+
+ _ASSERTE(th.GetNumGenericArgs() == 0);
+ }
+ break;
+ }
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ _ASSERTE(!data->vmTypeHandle.IsNull());
+ th = GetTypeHandle(data->vmTypeHandle);
+ break;
+ }
+
+ default:
+ th = g_pEEInterface->FindLoadedElementType(data->elementType);
+ break;
+ }
+ if (th.IsNull())
+ return CORDBG_E_CLASS_NOT_LOADED;
+ *pRes = th;
+ return S_OK;
+}
+
+// Iterate through the type argument data, creating type handles as we go.
+void Debugger::TypeDataWalk::ReadTypeHandles(unsigned int nTypeArgs, TypeHandle *ppResults)
+{
+ WRAPPER_NO_CONTRACT;
+
+ for (unsigned int i = 0; i < nTypeArgs; i++)
+ ppResults[i] = ReadTypeHandle();
+ }
+
+TypeHandle Debugger::TypeDataWalk::ReadInstantiation(Module *pModule, mdTypeDef tok, unsigned int nTypeArgs)
+{
+ WRAPPER_NO_CONTRACT;
+
+ DWORD dwAllocSize;
+ if (!ClrSafeInt<DWORD>::multiply(nTypeArgs, sizeof(TypeHandle), dwAllocSize))
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+ TypeHandle * inst = (TypeHandle *) _alloca(dwAllocSize);
+ ReadTypeHandles(nTypeArgs, inst) ;
+ TypeHandle th = g_pEEInterface->LoadInstantiation(pModule, tok, nTypeArgs, inst);
+ if (th.IsNull())
+ COMPlusThrow(kArgumentException, W("Argument_InvalidGenericArg"));
+ return th;
+}
+
+TypeHandle Debugger::TypeDataWalk::ReadTypeHandle()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ DebuggerIPCE_TypeArgData * data = ReadOne();
+ if (!data)
+ COMPlusThrow(kArgumentException, W("Argument_InvalidGenericArg"));
+
+ LOG((LF_CORDB, LL_INFO10000, "D::ETITTH: expanding right-side type to left-side type, ELEMENT_TYPE: %d.\n", data->data.elementType));
+
+ TypeHandle th;
+ CorElementType et = data->data.elementType;
+ switch (et)
+ {
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ if(data->numTypeArgs == 1)
+ {
+ TypeHandle typar = ReadTypeHandle();
+ switch (et)
+ {
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ th = g_pEEInterface->LoadArrayType(data->data.elementType, typar, data->data.ArrayTypeData.arrayRank);
+ break;
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ th = g_pEEInterface->LoadPointerOrByrefType(data->data.elementType, typar);
+ break;
+ default:
+ _ASSERTE(0);
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ DebuggerModule *pDebuggerModule = g_pDebugger->LookupOrCreateModule(data->data.ClassTypeData.vmDomainFile);
+ th = ReadInstantiation(pDebuggerModule->GetRuntimeModule(), data->data.ClassTypeData.metadataToken, data->numTypeArgs);
+ break;
+ }
+
+ case ELEMENT_TYPE_FNPTR:
+ {
+ SIZE_T cbAllocSize;
+ if ((!ClrSafeInt<SIZE_T>::multiply(data->numTypeArgs, sizeof(TypeHandle), cbAllocSize)) ||
+ (cbAllocSize != (size_t)(cbAllocSize)))
+ {
+ _ASSERTE(COR_E_OVERFLOW);
+ cbAllocSize = UINT_MAX;
+ }
+ TypeHandle * inst = (TypeHandle *) _alloca(cbAllocSize);
+ ReadTypeHandles(data->numTypeArgs, inst) ;
+ th = g_pEEInterface->LoadFnptrType(inst, data->numTypeArgs);
+ break;
+ }
+
+ default:
+ th = g_pEEInterface->LoadElementType(data->data.elementType);
+ break;
+ }
+ if (th.IsNull())
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Type"));
+ return th;
+
+}
+
+//
+// GetAndSendTransitionStubInfo figures out if an address is a stub
+// address and sends the result back to the right side.
+//
+void Debugger::GetAndSendTransitionStubInfo(CORDB_ADDRESS_TYPE *stubAddress)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::GASTSI: IsTransitionStub. Addr=0x%08x\n", stubAddress));
+
+ bool result = false;
+
+ result = g_pEEInterface->IsStub((const BYTE *)stubAddress);
+
+
+ // If its not a stub, then maybe its an address in mscoree?
+ if (result == false)
+ {
+ result = (IsIPInModule(g_pMSCorEE, (PCODE)stubAddress) == TRUE);
+ }
+
+ // This is a synchronous event (reply required)
+ DebuggerIPCEvent *event = m_pRCThread->GetIPCEventReceiveBuffer();
+ InitIPCEvent(event, DB_IPCE_IS_TRANSITION_STUB_RESULT, NULL, NULL);
+ event->IsTransitionStubResult.isStub = result;
+
+ // Send the result
+ m_pRCThread->SendIPCReply();
+}
+
+/*
+ * A generic request for a buffer in the left-side for use by the right-side
+ *
+ * This is a synchronous event (reply required).
+ */
+HRESULT Debugger::GetAndSendBuffer(DebuggerRCThread* rcThread, ULONG bufSize)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // This is a synchronous event (reply required)
+ DebuggerIPCEvent* event = rcThread->GetIPCEventReceiveBuffer();
+ PREFIX_ASSUME(event != NULL);
+ InitIPCEvent(event, DB_IPCE_GET_BUFFER_RESULT, NULL, NULL);
+
+ // Allocate the buffer
+ event->GetBufferResult.hr = AllocateRemoteBuffer( bufSize, &event->GetBufferResult.pBuffer );
+
+ // Send the result
+ return rcThread->SendIPCReply();
+}
+
+/*
+ * Allocate a buffer in the left-side for use by the right-side
+ */
+HRESULT Debugger::AllocateRemoteBuffer( ULONG bufSize, void **ppBuffer )
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // The call to Append below will call CUnorderedArray, which will call unsafe New.
+ HRESULT hr;
+ CHECK_IF_CAN_TAKE_HELPER_LOCKS_IN_THIS_SCOPE(&hr, GetCanary());
+ if( FAILED(hr) )
+ {
+ return hr;
+ }
+
+ // Actually allocate the buffer
+ BYTE* pBuffer = new (interopsafe, nothrow) BYTE[bufSize];
+
+ LOG((LF_CORDB, LL_EVERYTHING, "D::ARB: new'd 0x%x\n", *ppBuffer));
+
+ // Check for out of memory error
+ if (pBuffer == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ // Track the allocation so we can free it later
+ void **ppNextBlob = GetMemBlobs()->Append();
+ if( ppNextBlob == NULL )
+ {
+ DeleteInteropSafe( pBuffer );
+ return E_OUTOFMEMORY;
+ }
+ *ppNextBlob = pBuffer;
+
+ // Return the allocated memory
+ *ppBuffer = pBuffer;
+ return S_OK;
+}
+
+/*
+ * Used to release a previously-requested buffer
+ *
+ * This is a synchronous event (reply required).
+ */
+HRESULT Debugger::SendReleaseBuffer(DebuggerRCThread* rcThread, void *pBuffer)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_INFO10000, "D::SRB for buffer 0x%x\n", pBuffer));
+
+ // This is a synchronous event (reply required)
+ DebuggerIPCEvent* event = rcThread->GetIPCEventReceiveBuffer();
+ PREFIX_ASSUME(event != NULL);
+ InitIPCEvent(event, DB_IPCE_RELEASE_BUFFER_RESULT, NULL, NULL);
+
+ _ASSERTE(pBuffer != NULL);
+
+ // Free the memory
+ ReleaseRemoteBuffer(pBuffer, true);
+
+ // Indicate success in reply
+ event->ReleaseBufferResult.hr = S_OK;
+
+ // Send the result
+ return rcThread->SendIPCReply();
+}
+
+
+//
+// Used to delete the buffer previously-requested by the right side.
+// We've factored the code since both the ~Debugger and SendReleaseBuffer
+// methods do this.
+//
+HRESULT Debugger::ReleaseRemoteBuffer(void *pBuffer, bool removeFromBlobList)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "D::RRB: Releasing RS-alloc'd buffer 0x%x\n", pBuffer));
+
+ // Remove the buffer from the blob list if necessary.
+ if (removeFromBlobList)
+ {
+ USHORT cBlobs = GetMemBlobs()->Count();
+ void **rgpBlobs = GetMemBlobs()->Table();
+
+ USHORT i;
+ for (i = 0; i < cBlobs; i++)
+ {
+ if (rgpBlobs[i] == pBuffer)
+ {
+ GetMemBlobs()->DeleteByIndex(i);
+ break;
+ }
+ }
+
+ // We should have found a match. All buffers passed to ReleaseRemoteBuffer
+ // should have been allocated with AllocateRemoteBuffer and not yet freed.
+ _ASSERTE( i < cBlobs );
+ }
+
+ // Delete the buffer. (Need cast for GCC template support)
+ DeleteInteropSafe( (BYTE*)pBuffer );
+
+ return S_OK;
+}
+
+//
+// UnrecoverableError causes the Left Side to enter a state where no more
+// debugging can occur and we leave around enough information for the
+// Right Side to tell what happened.
+//
+void Debugger::UnrecoverableError(HRESULT errorHR,
+ unsigned int errorCode,
+ const char *errorFile,
+ unsigned int errorLine,
+ bool exitThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10,
+ "Unrecoverable error: hr=0x%08x, code=%d, file=%s, line=%d\n",
+ errorHR, errorCode, errorFile, errorLine));
+
+ //
+ // Setting this will ensure that not much else happens...
+ //
+ m_unrecoverableError = TRUE;
+
+ //
+ // Fill out the control block with the error.
+ // in-proc will find out when the function fails
+ //
+ DebuggerIPCControlBlock *pDCB = m_pRCThread->GetDCB();
+
+ PREFIX_ASSUME(pDCB != NULL);
+
+ pDCB->m_errorHR = errorHR;
+ pDCB->m_errorCode = errorCode;
+
+ //
+ // If we're told to, exit the thread.
+ //
+ if (exitThread)
+ {
+ LOG((LF_CORDB, LL_INFO10,
+ "Thread exiting due to unrecoverable error.\n"));
+ ExitThread(errorHR);
+ }
+}
+
+//
+// Callback for IsThreadAtSafePlace's stack walk.
+//
+StackWalkAction Debugger::AtSafePlaceStackWalkCallback(CrawlFrame *pCF,
+ VOID* data)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ PRECONDITION(CheckPointer(pCF));
+ PRECONDITION(CheckPointer(data));
+ }
+ CONTRACTL_END;
+
+ bool *atSafePlace = (bool*)data;
+ LOG((LF_CORDB, LL_INFO100000, "D:AtSafePlaceStackWalkCallback\n"));
+
+ if (pCF->IsFrameless() && pCF->IsActiveFunc())
+ {
+ LOG((LF_CORDB, LL_INFO1000000, "D:AtSafePlaceStackWalkCallback, IsFrameLess() and IsActiveFunc()\n"));
+ if (g_pEEInterface->CrawlFrameIsGcSafe(pCF))
+ {
+ LOG((LF_CORDB, LL_INFO1000000, "D:AtSafePlaceStackWalkCallback - TRUE: CrawlFrameIsGcSafe()\n"));
+ *atSafePlace = true;
+ }
+ }
+ return SWA_ABORT;
+}
+
+//
+// Determine, via a quick one frame stack walk, if a given thread is
+// in a gc safe place.
+//
+bool Debugger::IsThreadAtSafePlaceWorker(Thread *thread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ PRECONDITION(CheckPointer(thread));
+ }
+ CONTRACTL_END;
+
+ bool atSafePlace = false;
+
+ // Setup our register display.
+ REGDISPLAY rd;
+ CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread);
+
+ _ASSERTE(!(g_pEEInterface->GetThreadFilterContext(thread) && ISREDIRECTEDTHREAD(thread)));
+ if (context != NULL)
+ {
+ g_pEEInterface->InitRegDisplay(thread, &rd, context, TRUE);
+ }
+ else
+ {
+ CONTEXT ctx;
+ ZeroMemory(&rd, sizeof(rd));
+ ZeroMemory(&ctx, sizeof(ctx));
+#if defined(_TARGET_X86_)
+ rd.ControlPC = ctx.Eip;
+ rd.PCTAddr = (TADDR)&(ctx.Eip);
+#else
+ FillRegDisplay(&rd, &ctx);
+#endif
+
+ if (ISREDIRECTEDTHREAD(thread))
+ {
+ thread->GetFrame()->UpdateRegDisplay(&rd);
+ }
+ }
+
+ // Do the walk. If it fails, we don't care, because we default
+ // atSafePlace to false.
+ g_pEEInterface->StackWalkFramesEx(
+ thread,
+ &rd,
+ Debugger::AtSafePlaceStackWalkCallback,
+ (VOID*)(&atSafePlace),
+ QUICKUNWIND | HANDLESKIPPEDFRAMES |
+ DISABLE_MISSING_FRAME_DETECTION);
+
+#ifdef LOGGING
+ if (!atSafePlace)
+ LOG((LF_CORDB | LF_GC, LL_INFO1000,
+ "Thread 0x%x is not at a safe place.\n",
+ GetThreadIdHelper(thread)));
+#endif
+
+ return atSafePlace;
+}
+
+bool Debugger::IsThreadAtSafePlace(Thread *thread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ PRECONDITION(CheckPointer(thread));
+ }
+ CONTRACTL_END;
+
+
+ if (m_fShutdownMode)
+ {
+ return true;
+ }
+
+ // <TODO>
+ //
+ // Make sure this fix is evaluated when doing real work for debugging SO handling.
+ //
+ // On the Stack Overflow code path calling IsThreadAtSafePlaceWorker as it is
+ // currently implemented is way too stack intensive. For now we cheat and just
+ // say that if a thread is in the middle of handling a SO it is NOT at a safe
+ // place. This is a reasonably safe assumption to make and hopefully shouldn't
+ // result in deadlocking the debugger.
+ if ( (thread->IsExceptionInProgress()) &&
+ (g_pEEInterface->GetThreadException(thread) == CLRException::GetPreallocatedStackOverflowExceptionHandle()) )
+ {
+ return false;
+ }
+ // </TODO>
+ else
+ {
+ return IsThreadAtSafePlaceWorker(thread);
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Get the complete user state flags.
+// This will collect flags both from the EE and from the LS.
+// This is the real implementation of the RS's ICorDebugThread::GetUserState().
+//
+// Parameters:
+// pThread - non-null thread to get state for.
+//
+// Returns: a CorDebugUserState flags enum describing state.
+//-----------------------------------------------------------------------------
+CorDebugUserState Debugger::GetFullUserState(Thread *pThread)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(pThread));
+ }
+ CONTRACTL_END;
+
+ CorDebugUserState state = g_pEEInterface->GetPartialUserState(pThread);
+
+ bool fSafe = IsThreadAtSafePlace(pThread);
+ if (!fSafe)
+ {
+ state = (CorDebugUserState) (state | USER_UNSAFE_POINT);
+ }
+
+ return state;
+}
+
+/******************************************************************************
+ *
+ * Helper for debugger to get an unique thread id
+ * If we are not in Fiber mode, we can safely use OSThreadId
+ * Otherwise, we will use our own unique ID.
+ *
+ * We will return our unique ID when our host is hosting Thread.
+ *
+ *
+ ******************************************************************************/
+DWORD Debugger::GetThreadIdHelper(Thread *pThread)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!CLRTaskHosted())
+ {
+ // use the plain old OS Thread ID
+ return pThread->GetOSThreadId();
+ }
+ else
+ {
+ // use our unique thread ID
+ return pThread->GetThreadId();
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Called by EnC during remapping to get information about the local vars.
+// EnC will then use this to set values in the new version to their corresponding
+// values from the old version.
+//
+// Returns a pointer to the debugger's copies of the maps. Caller
+// does not own the memory provided via vars outparameter.
+//-----------------------------------------------------------------------------
+void Debugger::GetVarInfo(MethodDesc * fd, // [IN] method of interest
+ void *DebuggerVersionToken, // [IN] which edit version
+ SIZE_T * cVars, // [OUT] size of 'vars'
+ const ICorDebugInfo::NativeVarInfo **vars // [OUT] map telling where local vars are stored
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS_FROM_GETJITINFO;
+ }
+ CONTRACTL_END;
+
+ DebuggerJitInfo * ji = (DebuggerJitInfo *)DebuggerVersionToken;
+
+ // If we didn't supply a DJI, then we're asking for the most recent version.
+ if (ji == NULL)
+ {
+ ji = GetLatestJitInfoFromMethodDesc(fd);
+ }
+ _ASSERTE(fd == ji->m_fd);
+
+ PREFIX_ASSUME(ji != NULL);
+
+ *vars = ji->GetVarNativeInfo();
+ *cVars = ji->GetVarNativeInfoCount();
+}
+
+#include "openum.h"
+
+#ifdef EnC_SUPPORTED
+
+//---------------------------------------------------------------------------------------
+//
+// Apply an EnC edit to the CLR datastructures and send the result event to the
+// debugger right-side.
+//
+// Arguments:
+// pDebuggerModule - the module in which the edit should occur
+// cbMetadata - the number of bytes in pMetadata
+// pMetadata - pointer to the delta metadata
+// cbIL - the number of bytes in pIL
+// pIL - pointer to the delta IL
+//
+// Return Value:
+//
+// Assumptions:
+//
+// Notes:
+//
+// This is just the first half of processing an EnC request (hot swapping). This updates
+// the metadata and other CLR data structures to reflect the edit, but does not directly
+// affect code which is currently running. In order to achieve on-stack replacement
+// (remap of running code), we mine all old methods with "EnC remap breakpoints"
+// (instances of DebuggerEnCBreakpoint) at many sequence points. When one of those
+// breakpoints is hit, we give the debugger a RemapOpportunity event and give it a
+// chance to remap the execution to the new version of the method.
+//
+
+HRESULT Debugger::ApplyChangesAndSendResult(DebuggerModule * pDebuggerModule,
+ DWORD cbMetadata,
+ BYTE *pMetadata,
+ DWORD cbIL,
+ BYTE *pIL)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // @todo - if EnC never works w/ interop, caller New on the helper thread may be ok.
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+
+ HRESULT hr = S_OK;
+
+ LOG((LF_ENC, LL_INFO100, "Debugger::ApplyChangesAndSendResult\n"));
+
+ Module *pModule = pDebuggerModule->GetRuntimeModule();
+ if (! pModule->IsEditAndContinueEnabled())
+ {
+ hr = CORDBG_E_ENC_MODULE_NOT_ENC_ENABLED;
+ }
+ else
+ {
+ // Violation with the following call stack:
+ // CONTRACT in MethodTableBuilder::InitMethodDesc
+ // CONTRACT in EEClass::AddMethod
+ // CONTRACT in EditAndContinueModule::AddMethod
+ // CONTRACT in EditAndContinueModule::ApplyEditAndContinue
+ // CONTRACT in EEDbgInterfaceImpl::EnCApplyChanges
+ // VIOLATED--> CONTRACT in Debugger::ApplyChangesAndSendResult
+ CONTRACT_VIOLATION(GCViolation);
+
+ // Tell the VM to apply the edit
+ hr = g_pEEInterface->EnCApplyChanges(
+ (EditAndContinueModule*)pModule, cbMetadata, pMetadata, cbIL, pIL);
+ }
+
+ LOG((LF_ENC, LL_INFO100, "Debugger::ApplyChangesAndSendResult 2\n"));
+
+ DebuggerIPCEvent* event = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(event,
+ DB_IPCE_APPLY_CHANGES_RESULT,
+ NULL,
+ NULL);
+
+ event->ApplyChangesResult.hr = hr;
+
+ // Send the result
+ return m_pRCThread->SendIPCEvent();
+}
+
+//
+// This structure is used to hold a list of the sequence points in a function and
+// determine which should have remap breakpoints applied to them for EnC
+//
+class EnCSequencePointHelper
+{
+public:
+ // Calculates remap info given the supplied JitInfo
+ EnCSequencePointHelper(DebuggerJitInfo *pJitInfo);
+ ~EnCSequencePointHelper();
+
+ // Returns true if the specified sequence point (given by it's index in the
+ // sequence point table in the JitInfo) should get an EnC remap breakpoint.
+ BOOL ShouldSetRemapBreakpoint(unsigned int offsetIndex);
+
+private:
+ DebuggerJitInfo *m_pJitInfo;
+
+ DebugOffsetToHandlerInfo *m_pOffsetToHandlerInfo;
+};
+
+//
+// Goes through the list of sequence points for a function and determines whether or not each
+// is a valid Remap Breakpoint location (not in a special offset, must be empty stack, and not in a handler.
+//
+EnCSequencePointHelper::EnCSequencePointHelper(DebuggerJitInfo *pJitInfo)
+ : m_pOffsetToHandlerInfo(NULL),
+ m_pJitInfo(pJitInfo)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (m_pJitInfo->GetSequenceMapCount() == 0)
+ {
+ return;
+ }
+
+ // Construct a list of native offsets we may want to place EnC breakpoints at
+ m_pOffsetToHandlerInfo = new DebugOffsetToHandlerInfo[m_pJitInfo->GetSequenceMapCount()];
+ for (unsigned int i = 0; i < m_pJitInfo->GetSequenceMapCount(); i++)
+ {
+ // By default this slot is unused. We want the indexes in m_pOffsetToHandlerInfo
+ // to correspond to the indexes of m_pJitInfo->GetSequenceMapCount, so we rely
+ // on a -1 offset to indicate that a DebuggerOffsetToHandlerInfo is unused.
+ // However, it would be cleaner and permit a simpler API to the EE if we just
+ // had an array mapping the offsets instead.
+ m_pOffsetToHandlerInfo[i].offset = (SIZE_T) -1;
+ m_pOffsetToHandlerInfo[i].isInFilterOrHandler = FALSE;
+
+ SIZE_T offset = m_pJitInfo->GetSequenceMap()[i].nativeStartOffset;
+
+ // Check if this is a "special" IL offset, such as representing the prolog or eppilog,
+ // or other region not directly mapped to native code.
+ if (DbgIsSpecialILOffset(pJitInfo->GetSequenceMap()[i].ilOffset))
+ {
+ LOG((LF_ENC, LL_INFO10000,
+ "D::UF: not placing E&C breakpoint at special offset 0x%x (IL: 0x%x)\n",
+ offset, m_pJitInfo->GetSequenceMap()[i].ilOffset));
+ continue;
+ }
+
+ // Skip duplicate sequence points
+ if (i >=1 && offset == pJitInfo->GetSequenceMap()[i-1].nativeStartOffset)
+ {
+ LOG((LF_ENC, LL_INFO10000,
+ "D::UF: not placing redundant E&C "
+ "breakpoint at duplicate offset 0x%x (IL: 0x%x)\n",
+ offset, m_pJitInfo->GetSequenceMap()[i].ilOffset));
+ continue;
+ }
+
+ // Skip sequence points that aren't due to the evaluation stack being empty
+ // We can only remap at stack-empty points (since we don't have a mapping for
+ // contents of the evaluation stack).
+ if (!(pJitInfo->GetSequenceMap()[i].source & ICorDebugInfo::STACK_EMPTY))
+ {
+ LOG((LF_ENC, LL_INFO10000,
+ "D::UF: not placing E&C breakpoint at offset "
+ "0x%x (IL: 0x%x) b/c not STACK_EMPTY:it's 0x%x\n", offset,
+ m_pJitInfo->GetSequenceMap()[i].ilOffset, pJitInfo->GetSequenceMap()[i].source));
+ continue;
+ }
+
+ // So far this sequence point looks good, so store it's native offset so we can get
+ // EH information about it from the EE.
+ LOG((LF_ENC, LL_INFO10000,
+ "D::UF: possibly placing E&C breakpoint at offset "
+ "0x%x (IL: 0x%x)\n", offset, m_pJitInfo->GetSequenceMap()[i].ilOffset));
+ m_pOffsetToHandlerInfo[i].offset = m_pJitInfo->GetSequenceMap()[i].nativeStartOffset;
+
+ }
+
+ // Ask the EE to fill in the isInFilterOrHandler bit for the native offsets we're interested in
+ g_pEEInterface->DetermineIfOffsetsInFilterOrHandler(
+ (BYTE *)pJitInfo->m_addrOfCode, m_pOffsetToHandlerInfo, m_pJitInfo->GetSequenceMapCount());
+}
+
+EnCSequencePointHelper::~EnCSequencePointHelper()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (m_pOffsetToHandlerInfo)
+ {
+ delete m_pOffsetToHandlerInfo;
+ }
+}
+
+//
+// Returns if we should set a remap breakpoint at a given offset. We only set them at 0-depth stack
+// and not when inside a handler, either finally, filter, or catch
+//
+BOOL EnCSequencePointHelper::ShouldSetRemapBreakpoint(unsigned int offsetIndex)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ {
+ // GetSequenceMapCount calls LazyInitBounds() which can eventually
+ // call ExecutionManager::IncrementReader
+ CONTRACT_VIOLATION(TakesLockViolation);
+ _ASSERTE(offsetIndex <= m_pJitInfo->GetSequenceMapCount());
+ }
+
+ // If this slot is unused (offset -1), we excluded it early
+ if (m_pOffsetToHandlerInfo[offsetIndex].offset == (SIZE_T) -1)
+ {
+ return FALSE;
+ }
+
+ // Otherwise, check the isInFilterOrHandler bit
+ if (m_pOffsetToHandlerInfo[offsetIndex].isInFilterOrHandler)
+ {
+ LOG((LF_ENC, LL_INFO10000,
+ "D::UF: not placing E&C breakpoint in filter/handler at offset 0x%x\n",
+ m_pOffsetToHandlerInfo[offsetIndex].offset));
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+
+//-----------------------------------------------------------------------------
+// For each function that's EnC-ed, the EE will call either UpdateFunction
+// (if the function already is loaded + jitted) or Addfunction
+//
+// This is called before the EE updates the MethodDesc, so pMD does not yet
+// point to the version we'll be remapping to.
+//-----------------------------------------------------------------------------
+HRESULT Debugger::UpdateFunction(MethodDesc* pMD, SIZE_T encVersion)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS_FROM_GETJITINFO;
+ PRECONDITION(ThisIsHelperThread()); // guarantees we're serialized.
+ PRECONDITION(IsStopped());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::UF: updating "
+ "%s::%s to version %d\n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, encVersion));
+
+ // tell the RS that this function has been updated so that it can create new CorDBFunction
+ Module *pModule = g_pEEInterface->MethodDescGetModule(pMD);
+ _ASSERTE(pModule != NULL);
+ mdToken methodDef = pMD->GetMemberDef();
+ SendEnCUpdateEvent(DB_IPCE_ENC_UPDATE_FUNCTION,
+ pModule,
+ methodDef,
+ pMD->GetMethodTable()->GetCl(),
+ encVersion);
+
+ DebuggerMethodInfo *dmi = GetOrCreateMethodInfo(pModule, methodDef);
+ if (dmi == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ // The DMI always holds the most current EnC version number. We always JIT the most
+ // current version of the function, so when we do see a JitBegin we will create a new
+ // dji for it and stash the current version there. We don't want to change the current
+ // jit info because it has to maintain the version for the code it corresponds to.
+ dmi->SetCurrentEnCVersion(encVersion);
+
+ // This is called before the MethodDesc is updated to point to the new function.
+ // So this call will get the most recent old function.
+ DebuggerJitInfo *pJitInfo = GetLatestJitInfoFromMethodDesc(pMD);
+
+ if (pJitInfo == NULL )
+ {
+ LOG((LF_CORDB,LL_INFO10000,"Unable to get DJI by recently "
+ "D::UF: JITted version number (it hasn't been jitted yet),"
+ "which is fine\n"));
+ return S_OK;
+ }
+
+ //
+ // Mine the old version of the method with patches so that we can provide
+ // remap opportunities whenever the old version of the method is executed.
+ //
+
+ if (pJitInfo->m_encBreakpointsApplied)
+ {
+ LOG((LF_CORDB,LL_INFO10000,"D::UF: Breakpoints already applied\n"));
+ return S_OK;
+ }
+
+ LOG((LF_CORDB,LL_INFO10000,"D::UF: Applying breakpoints\n"));
+
+ // We only place the patches if we have jit info for this
+ // function, i.e., its already been jitted. Otherwise, the EE will
+ // pickup the new method on the next JIT anyway.
+
+ ICorDebugInfo::SourceTypes src;
+
+ EnCSequencePointHelper sequencePointHelper(pJitInfo);
+
+ // For each offset in the IL->Native map, set a new EnC breakpoint on the
+ // ones that we know could be remap points.
+ for (unsigned int i = 0; i < pJitInfo->GetSequenceMapCount(); i++)
+ {
+ // Skip if this isn't a valid remap point (eg. is in an exception handler)
+ if (! sequencePointHelper.ShouldSetRemapBreakpoint(i))
+ {
+ continue;
+ }
+
+ SIZE_T offset = pJitInfo->GetSequenceMap()[i].nativeStartOffset;
+
+ LOG((LF_CORDB, LL_INFO10000,
+ "D::UF: placing E&C breakpoint at native offset 0x%x\n",
+ offset));
+
+ DebuggerEnCBreakpoint *bp;
+
+ // Create and activate a new EnC remap breakpoint here in the old version of the method
+ bp = new (interopsafe) DebuggerEnCBreakpoint( offset,
+ pJitInfo,
+ DebuggerEnCBreakpoint::REMAP_PENDING,
+ (AppDomain *)pModule->GetDomain());
+
+ _ASSERTE(bp != NULL);
+ }
+
+ pJitInfo->m_encBreakpointsApplied = true;
+
+ return S_OK;
+}
+
+// Called to update a function that hasn't yet been loaded (and so we don't have a MethodDesc).
+// This may be updating an existing function on a type that hasn't been loaded
+// or adding a new function to a type that hasn't been loaded.
+// We need to notify the debugger so that it can properly track version info.
+HRESULT Debugger::UpdateNotYetLoadedFunction(mdMethodDef token, Module * pModule, SIZE_T encVersion)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+
+ PRECONDITION(ThisIsHelperThread());
+ PRECONDITION(ThreadHoldsLock()); // must have lock since we're on helper and stopped.
+ }
+ CONTRACTL_END;
+
+ DebuggerMethodInfo *dmi = GetOrCreateMethodInfo(pModule, token);
+ if (! dmi)
+ {
+ return E_OUTOFMEMORY;
+ }
+ dmi->SetCurrentEnCVersion(encVersion);
+
+
+ // Must tell the RS that this function has been added so that it can create new CorDBFunction.
+ mdTypeDef classToken = 0;
+
+ HRESULT hr = pModule->GetMDImport()->GetParentToken(token, &classToken);
+ if (FAILED(hr))
+ {
+ // We never expect this to actually fail, but just in case it does for some other crazy reason,
+ // we'll return before we AV.
+ CONSISTENCY_CHECK_MSGF(false, ("Class lookup failed:mdToken:0x%08x, pModule=%p. hr=0x%08x\n", token, pModule, hr));
+ return hr;
+ }
+
+ SendEnCUpdateEvent(DB_IPCE_ENC_ADD_FUNCTION, pModule, token, classToken, encVersion);
+
+
+ return S_OK;
+}
+
+// Called to add a new function when the type has been loaded already.
+// This is effectively the same as above, except that we're given a
+// MethodDesc instead of a module and token.
+// This should probably be merged into a single method since the caller
+// should always have a module and token available in both cases.
+HRESULT Debugger::AddFunction(MethodDesc* pMD, SIZE_T encVersion)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+
+ PRECONDITION(ThisIsHelperThread());
+ PRECONDITION(ThreadHoldsLock()); // must have lock since we're on helper and stopped.
+ }
+ CONTRACTL_END;
+
+ DebuggerDataLockHolder debuggerDataLockHolder(this);
+
+ LOG((LF_CORDB, LL_INFO10000, "D::AF: adding "
+ "%s::%s to version %d\n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName, encVersion));
+
+ _ASSERTE(pMD != NULL);
+ Module *pModule = g_pEEInterface->MethodDescGetModule(pMD);
+ _ASSERTE(pModule != NULL);
+ mdToken methodDef = pMD->GetMemberDef();
+
+ // tell the RS that this function has been added so that it can create new CorDBFunction
+ SendEnCUpdateEvent( DB_IPCE_ENC_ADD_FUNCTION,
+ pModule,
+ methodDef,
+ pMD->GetMethodTable()->GetCl(),
+ encVersion);
+
+ DebuggerMethodInfo *dmi = CreateMethodInfo(pModule, methodDef);
+ if (! dmi)
+ {
+ return E_OUTOFMEMORY;
+ }
+ dmi->SetCurrentEnCVersion(encVersion);
+
+ return S_OK;
+}
+
+// Invoke when a field is added to a class using EnC
+HRESULT Debugger::AddField(FieldDesc* pFD, SIZE_T encVersion)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::AFld: adding "
+ "%8.8d::%8.8d to version %d\n", pFD->GetApproxEnclosingMethodTable()->GetCl(), pFD->GetMemberDef(), encVersion));
+
+ // tell the RS that this field has been added so that it can update it's structures
+ SendEnCUpdateEvent( DB_IPCE_ENC_ADD_FIELD,
+ pFD->GetModule(),
+ pFD->GetMemberDef(),
+ pFD->GetApproxEnclosingMethodTable()->GetCl(),
+ encVersion);
+
+ return S_OK;
+}
+
+//
+// RemapComplete is called when we are just about to resume into
+// the function so that we can setup our breakpoint to trigger
+// a call to the RemapComplete callback once the function is actually
+// on the stack. We need to wait until the function is jitted before
+// we can add the trigger, which doesn't happen until we call
+// ResumeInUpdatedFunction in the VM
+//
+// addr is address within the given function, which we use to determine
+// exact EnC version.
+//
+HRESULT Debugger::RemapComplete(MethodDesc* pMD, TADDR addr, SIZE_T nativeOffset)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS_FROM_GETJITINFO;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pMD != NULL);
+ _ASSERTE(addr != NULL);
+
+ LOG((LF_CORDB, LL_INFO10000, "D::RC: installed remap complete patch for "
+ "%s::%s to version %d\n", pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
+
+ DebuggerMethodInfo *dmi = GetOrCreateMethodInfo(pMD->GetModule(), pMD->GetMemberDef());
+
+ if (dmi == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ DebuggerJitInfo *pJitInfo = GetJitInfo(pMD, (const BYTE *) addr);
+
+ if (pJitInfo == NULL)
+ {
+ _ASSERTE(!"Debugger doesn't handle OOM");
+ return E_OUTOFMEMORY;
+ }
+ _ASSERTE(pJitInfo->m_addrOfCode + nativeOffset == addr);
+
+ DebuggerEnCBreakpoint *bp;
+
+ // Create and activate a new REMAP_COMPLETE EnC breakpoint to let us know when
+ // the EE has completed the remap process.
+ // This will be deleted when the patch is hit.
+ bp = new (interopsafe, nothrow) DebuggerEnCBreakpoint( nativeOffset,
+ pJitInfo,
+ DebuggerEnCBreakpoint::REMAP_COMPLETE,
+ (AppDomain *)pMD->GetModule()->GetDomain());
+ if (bp == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ return S_OK;
+}
+
+//-----------------------------------------------------------------------------
+// Called by EnC stuff to map an IL offset to a native offset for the given
+// method described by (pMD, nativeFnxStart).
+//
+// pMD - methoddesc for method being remapped
+// ilOffset - incoming offset in old method to remap.
+// nativeFnxStart - address of new function. This can be used to find the DJI
+// for the new method.
+// nativeOffset - outparameter for native linear offset relative to start address.
+//-----------------------------------------------------------------------------
+
+HRESULT Debugger::MapILInfoToCurrentNative(MethodDesc *pMD,
+ SIZE_T ilOffset,
+ TADDR nativeFnxStart,
+ SIZE_T *nativeOffset)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS_FROM_GETJITINFO;
+ PRECONDITION(nativeOffset != NULL);
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(nativeFnxStart != NULL);
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(HasLazyData()); // only used for EnC, should have already inited.
+
+
+ LOG((LF_CORDB, LL_INFO1000000, "D::MILITCN: %s::%s ilOff:0x%x, "
+ ", natFnx:0x%x dji:0x%x\n", pMD->m_pszDebugClassName,
+ pMD->m_pszDebugMethodName, ilOffset, nativeFnxStart));
+
+ *nativeOffset = 0;
+ DebuggerJitInfo *djiTo = GetJitInfo( pMD, (const BYTE *)nativeFnxStart);
+ if (djiTo == NULL)
+ {
+ _ASSERTE(!"No DJI in EnC case: should only happen on oom. Debugger doesn't support OOM.");
+ return E_FAIL;
+ }
+
+ DebuggerJitInfo::ILToNativeOffsetIterator it;
+ djiTo->InitILToNativeOffsetIterator(it, ilOffset);
+ *nativeOffset = it.CurrentAssertOnlyOne(NULL);
+ return S_OK;
+}
+
+#endif // EnC_SUPPORTED
+
+//---------------------------------------------------------------------------------------
+// Hijack worker stub called from asm stub. This can then delegate to other hijacks.
+//
+// Arguments:
+// pContext - context from which we were hijacked. Always non-null.
+// pRecord - exception record if hijacked from an exception event.
+// Else null (if hijacked from a managed IP).
+// reason - hijack reason. Use this to delegate to the proper hijack stub.
+// pData - arbitrary data for the hijack to use. (eg, such as a DebuggerEval object)
+//
+// Returns:
+// This does not return. Instead it restores this threads context to pContext.
+//
+// Assumptions:
+// If hijacked at an exception event, the debugger must have cleared the exception.
+//
+// Notes:
+// The debugger hijacked the thread to get us here via the DacDbi Hijack primitive.
+// This is called from a hand coded asm stub.
+//
+void STDCALL ExceptionHijackWorker(
+ CONTEXT * pContext,
+ EXCEPTION_RECORD * pRecord,
+ EHijackReason::EHijackReason reason,
+ void * pData)
+{
+ STRESS_LOG0(LF_CORDB,LL_INFO100, "D::EHW: Enter ExceptionHijackWorker\n");
+
+ // We could have many different reasons for hijacking. Switch and invoke the proper hijacker.
+ switch(reason)
+ {
+ case EHijackReason::kUnhandledException:
+ STRESS_LOG0(LF_CORDB,LL_INFO10, "D::EHW: Calling g_pDebugger->UnhandledHijackWorker()\n");
+ _ASSERTE(pData == NULL);
+ g_pDebugger->UnhandledHijackWorker(pContext, pRecord);
+ break;
+#ifdef FEATURE_INTEROP_DEBUGGING
+ case EHijackReason::kM2UHandoff:
+ _ASSERTE(pData == NULL);
+ g_pDebugger->M2UHandoffHijackWorker(pContext, pRecord);
+ break;
+ case EHijackReason::kFirstChanceSuspend:
+ _ASSERTE(pData == NULL);
+ g_pDebugger->FirstChanceSuspendHijackWorker(pContext, pRecord);
+ break;
+ case EHijackReason::kGenericHijack:
+ _ASSERTE(pData == NULL);
+ g_pDebugger->GenericHijackFunc();
+ break;
+#endif
+ default:
+ CONSISTENCY_CHECK_MSGF(false, ("Unrecognized Hijack code: %d", reason));
+ }
+
+ // Currently, no Hijack actually returns yet.
+ UNREACHABLE();
+
+ // If we return to this point, then we'll restore ourselves.
+ // We've got the context that we were hijacked from, so we should be able to just
+ // call SetThreadContext on ourself to fix us.
+}
+
+#if defined(WIN64EXCEPTIONS)
+
+#if defined(_TARGET_AMD64_)
+// ----------------------------------------------------------------------------
+// EmptyPersonalityRoutine
+//
+// Description:
+// This personality routine is used to work around a limitation of the OS unwinder when we return
+// ExceptionCollidedUnwind.
+// See code:ExceptionHijackPersonalityRoutine for more information.
+//
+// Arguments:
+// * pExceptionRecord - not used
+// * MemoryStackFp - not used
+// * BackingStoreFp - not used
+// * pContextRecord - not used
+// * pDispatcherContext - not used
+// * GlobalPointer - not used
+//
+// Return Value:
+// Always return ExceptionContinueSearch.
+//
+
+EXCEPTION_DISPOSITION EmptyPersonalityRoutine(IN PEXCEPTION_RECORD pExceptionRecord,
+ IN ULONG64 MemoryStackFp,
+ IN OUT PCONTEXT pContextRecord,
+ IN OUT PDISPATCHER_CONTEXT pDispatcherContext)
+{
+ LIMITED_METHOD_CONTRACT;
+ return ExceptionContinueSearch;
+}
+#endif // _TARGET_AMD64_
+
+//---------------------------------------------------------------------------------------
+// Personality routine for unwinder the assembly hijack stub on 64-bit.
+//
+// Arguments:
+// standard Personality routine signature.
+//
+// Assumptions:
+// This is caleld by the OS exception logic during exception handling.
+//
+// Notes:
+// We just need 1 personality routine for the tiny assembly hijack stub.
+// All the C++ code invoked by the stub is ok.
+//
+// This needs to fetch the original context that this thread was hijacked from
+// (which the hijack pushed onto the stack) and pass that back to the OS. This lets
+// ths OS unwind out of the hijack.
+//
+// This function should only be executed if an unhandled exception is intercepted by a managed debugger.
+// Otherwise there should never be a 2nd pass exception dispatch crossing the hijack stub.
+//
+// The basic idea here is straightforward. The OS does an exception dispatch and hit our hijack stub.
+// Since the hijack stub is not unwindable, we need a personality routine to restore the CONTEXT and
+// tell the OS to continue the dispatch with that CONTEXT by returning ExceptionCollidedUnwind.
+//
+// However, empricially, the OS expects that when we return ExceptionCollidedUnwind, the function
+// represented by the CONTEXT has a personality routine. The OS will actually AV if we return a NULL
+// personality routine.
+//
+// On AMD64, we work around this by using an empty personality routine.
+
+EXTERN_C EXCEPTION_DISPOSITION
+ExceptionHijackPersonalityRoutine(IN PEXCEPTION_RECORD pExceptionRecord
+ WIN64_ARG(IN ULONG64 MemoryStackFp)
+ NOT_WIN64_ARG(IN ULONG32 MemoryStackFp),
+ IN OUT PCONTEXT pContextRecord,
+ IN OUT PDISPATCHER_CONTEXT pDispatcherContext
+ )
+{
+#if defined(_TARGET_AMD64_)
+ CONTEXT * pHijackContext = NULL;
+
+ // Get the 1st parameter (the Context) from hijack worker.
+ pHijackContext = *reinterpret_cast<CONTEXT **>(pDispatcherContext->EstablisherFrame);
+
+ // This copies pHijackContext into pDispatcherContext, which the OS can then
+ // use to walk the stack.
+ FixupDispatcherContext(pDispatcherContext, pHijackContext, pContextRecord, (PEXCEPTION_ROUTINE)EmptyPersonalityRoutine);
+#else
+ _ASSERTE(!"NYI - ExceptionHijackPersonalityRoutine()");
+#endif
+
+ // Returning ExceptionCollidedUnwind will cause the OS to take our new context record and
+ // dispatcher context and restart the exception dispatching on this call frame, which is
+ // exactly the behavior we want.
+ return ExceptionCollidedUnwind;
+}
+#endif // WIN64EXCEPTIONS
+
+
+// UEF Prototype from excep.cpp
+LONG InternalUnhandledExceptionFilter_Worker(EXCEPTION_POINTERS *pExceptionInfo);
+
+//---------------------------------------------------------------------------------------
+// Hijack for a 2nd-chance exception. Will invoke the CLR's UEF.
+//
+// Arguments:
+// pContext - context that this thread was hijacked from.
+// pRecord - exception record of the exception that this was hijacked at.
+// pData - random data.
+// Notes:
+// When under a native-debugger, the OS does not invoking the Unhandled Exception Filter (UEF).
+// It dispatches a 2nd-chance Exception event instead.
+// However, the CLR's UEF does lots of useful work (like dispatching the 2nd-chance managed exception,
+// allowing func-eval on 2nd-chance, and allowing intercepting unhandled exceptions).
+// So we'll emulate the OS behavior here by invoking the CLR's UEF directly.
+//
+void Debugger::UnhandledHijackWorker(CONTEXT * pContext, EXCEPTION_RECORD * pRecord)
+{
+ CONTRACTL
+ {
+ // The ultimate protection shield is that this hijack can be executed under the same circumstances
+ // as a top-level UEF that pinvokes into managed code
+ // - That means we're GC-triggers safe
+ // - that means that we can crawl the stack. (1st-pass EH logic ensures this).
+ // We need to be GC-triggers because this may invoke a func-eval.
+ GC_TRIGGERS;
+
+ // Don't throw out of a hijack! There's nobody left to catch this.
+ NOTHROW;
+
+ // We expect to always be in preemptive here by the time we get this unhandled notification.
+ // We know this is true because a native UEF is preemptive.
+ // More detail:
+ // 1) If we got here from a software exception (eg, Throw from C#), then the jit helper
+ // toggled us to preemptive before calling RaiseException().
+ // 2) If we got here from a hardware exception in managed code, then the 1st-pass already did
+ // some magic to get us into preemptive. On x86, this is magic. On 64-bit, it did some magic
+ // to push a Faulting-Exception-Frame and rethrow the exception as a software exception.
+ MODE_PREEMPTIVE;
+
+
+ PRECONDITION(CheckPointer(pContext));
+ PRECONDITION(CheckPointer(pRecord));
+ }
+ CONTRACTL_END;
+
+ EXCEPTION_POINTERS exceptionInfo;
+ exceptionInfo.ContextRecord = pContext;
+ exceptionInfo.ExceptionRecord = pRecord;
+
+ // Snag the Runtime thread. Since we're hijacking a managed exception, we should always have one.
+ Thread * pThread = g_pEEInterface->GetThread();
+ (void)pThread; //prevent "unused variable" error from GCC
+ _ASSERTE(pThread != NULL);
+
+ BOOL fSOException = FALSE;
+
+ if ((pRecord != NULL) &&
+ (pRecord->ExceptionCode == STATUS_STACK_OVERFLOW))
+ {
+ fSOException = TRUE;
+ }
+
+ // because we hijack here during jit attach invoked by the OS we need to make sure that the debugger is completely
+ // attached before continuing. If we ever hijacked here when an attach was not in progress this function returns
+ // immediately so no problems there.
+ WaitForDebuggerAttach();
+ PostJitAttach();
+
+ // On Win7 WatsonLastChance returns CONTINUE_SEARCH for unhandled exceptions execpt stack overflow, and
+ // lets OS launch debuggers for us. Before the unhandled exception reaches the OS, CLR UEF has already
+ // processed this unhandled exception. Thus, we should not call into CLR UEF again if it is the case.
+ if (RunningOnWin7() &&
+ pThread &&
+ (pThread->HasThreadStateNC(Thread::TSNC_ProcessedUnhandledException) ||
+ pThread->HasThreadStateNC(Thread::TSNC_AppDomainContainUnhandled) ||
+ fSOException))
+ {
+
+ FrameWithCookie<FaultingExceptionFrame> fef;
+#if defined(WIN64EXCEPTIONS)
+ *((&fef)->GetGSCookiePtr()) = GetProcessGSCookie();
+#endif // WIN64EXCEPTIONS
+ if ((pContext != NULL) && fSOException)
+ {
+ GCX_COOP(); // Must be cooperative to modify frame chain.
+
+ // EEPolicy::HandleFatalStackOverflow pushes a FaultingExceptionFrame on the stack after SO
+ // exception. Our hijack code runs in the exception context, and overwrites the stack space
+ // after SO excpetion, so this frame was popped out before invoking RaiseFailFast. We need to
+ // put it back here for running func-eval code.
+ // This cumbersome code should be removed once SO synchronization is moved to be completely
+ // out-of-process.
+ fef.InitAndLink(pContext);
+ }
+
+ STRESS_LOG0(LF_CORDB, LL_INFO10, "D::EHW: Calling NotifyDebuggerLastChance\n");
+ NotifyDebuggerLastChance(pThread, &exceptionInfo, TRUE);
+
+ // Continuing from a second chance managed exception causes the process to exit.
+ TerminateProcess(GetCurrentProcess(), 0);
+ }
+
+ // Since this is a unhandled managed exception:
+ // - we always have a Thread* object.
+ // - we always have a throwable
+ // - we executed through the 1st-pass of the EH logic. This means the 1st-pass could do work
+ // to enforce certain invariants (like the ones listed here, or ensuring the thread can be crawled)
+
+ // Need to call the CLR's UEF. This will do all the key work including:
+ // - send the managed 2nd-chance exception event.
+ // - deal with synchronization.
+ // - allow func-evals.
+ // - deal with interception.
+
+ // If intercepted, then this never returns. It will manually invoke the unwinders and fix the context.
+
+ // InternalUnhandledExceptionFilter_Worker has a throws contract, but should not be throwing in any
+ // conditions we care about. This hijack should never throw, so catch everything.
+ HRESULT hrIgnore;
+ EX_TRY
+ {
+ InternalUnhandledExceptionFilter_Worker(&exceptionInfo);
+ }
+ EX_CATCH_HRESULT(hrIgnore);
+
+ // Continuing from a second chance managed exception causes the process to exit.
+ TerminateProcess(GetCurrentProcess(), 0);
+}
+
+#ifdef FEATURE_INTEROP_DEBUGGING
+//
+// This is the handler function that is put in place of a thread's top-most SEH handler function when it is hijacked by
+// the Right Side during an unmanaged first chance exception.
+//
+typedef EXCEPTION_DISPOSITION (__cdecl *SEHHandler)(EXCEPTION_RECORD *pExceptionRecord,
+ EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
+ CONTEXT *pContext,
+ void *DispatcherContext);
+#define DOSPEW 0
+
+#if DOSPEW
+#define SPEW(s) s
+#else
+#define SPEW(s)
+#endif
+
+
+
+
+//-----------------------------------------------------------------------------
+// Hijack when we have a M2U handoff.
+// This happens when we do a step-out from Managed-->Unmanaged, and so we hit a managed patch in Native code.
+// This also happens when a managed stepper does a step-in to unmanaged code.
+// Since we're in native code, there's no CPFH, and so we have to hijack.
+// @todo- could this be removed? Step-out to native is illegal in v2.0, and do existing
+// CLR filters catch the step-in patch?
+// @dbgtodo controller/stepping - this will be completely unneeded in V3 when all stepping is oop
+//-----------------------------------------------------------------------------
+VOID Debugger::M2UHandoffHijackWorker(CONTEXT *pContext,
+ EXCEPTION_RECORD *pExceptionRecord)
+{
+ // We must use a static contract here because the function does not return normally
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS; // from sending managed event
+ STATIC_CONTRACT_MODE_PREEMPTIVE; // we're in umanaged code.
+ SO_NOT_MAINLINE_FUNCTION;
+
+
+ LOG((LF_CORDB, LL_INFO1000, "D::M2UHHW: Context=0x%p exception record=0x%p\n",
+ pContext, pExceptionRecord));
+
+ // We should only be here for a BP
+ _ASSERTE(pExceptionRecord->ExceptionCode == STATUS_BREAKPOINT);
+
+ // Get the current runtime thread. This is only an optimized TLS access.
+ // Since we're coming off a managed-step, we should always have a thread.
+ Thread *pEEThread = g_pEEInterface->GetThread();
+ _ASSERTE(pEEThread != NULL);
+
+ _ASSERTE(!pEEThread->GetInteropDebuggingHijacked());
+ pEEThread->SetInteropDebuggingHijacked(TRUE);
+
+ //win32 has a weird property where EIP points after the BP in the debug event
+ //so we are adjusting it to point at the BP
+ CORDbgAdjustPCForBreakInstruction((DT_CONTEXT*)pContext);
+ LOG((LF_CORDB, LL_INFO1000, "D::M2UHHW: Context ip set to 0x%p\n", GetIP(pContext)));
+
+ _ASSERTE(!ISREDIRECTEDTHREAD(pEEThread));
+
+ // Don't bother setting FilterContext here because we already pass it to FirstChanceNativeException.
+ // Shortcut right to our dispatch native exception logic, there may be no COMPlusFrameHandler in place!
+ EX_TRY
+ {
+ LOG((LF_CORDB, LL_INFO1000, "D::M2UHHW: Calling FirstChanceNativeException\n"));
+ bool okay;
+ okay = g_pDebugger->FirstChanceNativeException(pExceptionRecord,
+ pContext,
+ pExceptionRecord->ExceptionCode,
+ pEEThread);
+ _ASSERTE(okay == true);
+ LOG((LF_CORDB, LL_INFO1000, "D::M2UHHW: FirstChanceNativeException returned\n"));
+ }
+ EX_CATCH
+ {
+ // It would be really bad if somebody threw here. We're actually outside of managed code,
+ // so there's not a lot we can do besides just swallow the exception and hope for the best.
+ LOG((LF_CORDB, LL_INFO1000, "D::M2UHHW: ERROR! FirstChanceNativeException threw an exception\n"));
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ _ASSERTE(!ISREDIRECTEDTHREAD(pEEThread));
+ _ASSERTE(pEEThread->GetInteropDebuggingHijacked());
+ pEEThread->SetInteropDebuggingHijacked(FALSE);
+
+ // This signal will be received by the RS and it will use SetThreadContext
+ // to clear away the entire hijack frame. This function does not return.
+ LOG((LF_CORDB, LL_INFO1000, "D::M2UHHW: Flaring hijack complete\n"));
+ SignalHijackComplete();
+
+ _ASSERTE(!"UNREACHABLE");
+}
+
+//-----------------------------------------------------------------------------
+// This hijack is run after receiving an IB event that we don't know how the
+// debugger will want to continue. Under the covers we clear the event and divert
+// execution here where we block until the debugger decides whether or not to clear
+// the event. At that point we exit this hijack and the LS diverts execution back
+// to the offending instruction.
+// We don't know:
+// - whether we have an EE-thread?
+// - how we're going to continue this (handled / not-handled).
+//
+// But we do know that:
+// - this exception does not belong to the CLR.
+// - this thread is not in cooperative mode.
+//-----------------------------------------------------------------------------
+LONG Debugger::FirstChanceSuspendHijackWorker(CONTEXT *pContext,
+ EXCEPTION_RECORD *pExceptionRecord)
+{
+ // if we aren't set up to do interop debugging this function should just bail out
+ if(m_pRCThread == NULL)
+ return EXCEPTION_CONTINUE_SEARCH;
+
+ DebuggerIPCControlBlock *pDCB = m_pRCThread->GetDCB();
+ if(pDCB == NULL)
+ return EXCEPTION_CONTINUE_SEARCH;
+
+ if (!pDCB->m_rightSideIsWin32Debugger)
+ return EXCEPTION_CONTINUE_SEARCH;
+
+ // at this point we know that there is an interop debugger attached. This makes it safe to send
+ // flares
+#if DOSPEW
+ DWORD tid = GetCurrentThreadId();
+#endif
+
+ SPEW(fprintf(stderr, "0x%x D::FCHF: in first chance hijack filter.\n", tid));
+ SPEW(fprintf(stderr, "0x%x D::FCHF: pExceptionRecord=0x%p (%d), pContext=0x%p (%d)\n", tid, pExceptionRecord, sizeof(EXCEPTION_RECORD),
+ pContext, sizeof(CONTEXT)));
+#if defined(_TARGET_AMD64_)
+ SPEW(fprintf(stderr, "0x%x D::FCHF: code=0x%08x, addr=0x%p, Rip=0x%p, Rsp=0x%p, EFlags=0x%08x\n",
+ tid, pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionAddress, pContext->Rip, pContext->Rsp,
+ pContext->EFlags));
+#elif defined(_TARGET_X86_)
+ SPEW(fprintf(stderr, "0x%x D::FCHF: code=0x%08x, addr=0x%08x, Eip=0x%08x, Esp=0x%08x, EFlags=0x%08x\n",
+ tid, pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionAddress, pContext->Eip, pContext->Esp,
+ pContext->EFlags));
+
+#endif
+
+
+ // This memory is used as IPC during the hijack. We will place a pointer to this in
+ // either the EEThreadPtr or the EEDebuggerWord and then the RS can write info into
+ // the memory
+ DebuggerIPCFirstChanceData fcd;
+ // accessing through the volatile pointer to fend off some potential compiler optimizations.
+ // If the debugger changes that data from OOP we need to see those updates
+ volatile DebuggerIPCFirstChanceData* pFcd = &fcd;
+
+
+ {
+ // Hijack filters are always in the can't stop range.
+ // The RS knows this b/c it knows which threads it hijacked.
+ // Bump up the CS counter so that any further calls in the LS can see this too.
+ // (This makes places where we assert that we're in a CS region happy).
+ CantStopHolder hCantStop;
+
+ // Get the current runtime thread. This is only an optimized TLS access.
+ Thread *pEEThread = g_pEEInterface->GetThread();
+
+ // Is that really a ptr to a Thread? If the low bit is set or it its NULL then we don't have an EE Thread. If we
+ // have a EE Thread, then we know the original handler now. If not, we have to wait for the Right Side to fixup our
+ // handler chain once we've notified it that the exception does not belong to the runtime. Note: if we don't have an
+ // EE thread, then the exception never belongs to the Runtime.
+ bool hasEEThread = false;
+ if ((pEEThread != NULL) && !(((UINT_PTR)pEEThread) & 0x01))
+ {
+ SPEW(fprintf(stderr, "0x%x D::FCHF: Has EE thread.\n", tid));
+ hasEEThread = true;
+ }
+
+ // Hook up the memory so RS can get to it
+ fcd.pLeftSideContext.Set((DT_CONTEXT*)pContext);
+ fcd.action = HIJACK_ACTION_EXIT_UNHANDLED;
+ fcd.debugCounter = 0;
+ if(hasEEThread)
+ {
+ SPEW(fprintf(stderr, "0x%x D::FCHF: Set Debugger word to 0x%p.\n", tid, pFcd));
+ g_pEEInterface->SetThreadDebuggerWord(pEEThread, (VOID*) pFcd);
+ }
+ else
+ {
+ // this shouldn't be re-entrant
+ _ASSERTE(pEEThread == NULL);
+
+ SPEW(fprintf(stderr, "0x%x D::FCHF: EEThreadPtr word to 0x%p.\n", tid, (BYTE*)pFcd + 1));
+ g_pEEInterface->SetEEThreadPtr((void*) ((BYTE*)pFcd + 1));
+ }
+
+ // Signal the RS to tell us what to do
+ SPEW(fprintf(stderr, "0x%x D::FCHF: Signaling hijack started.\n", tid));
+ SignalHijackStarted();
+ SPEW(fprintf(stderr, "0x%x D::FCHF: Signaling hijack started complete. DebugCounter=0x%x\n", tid, pFcd->debugCounter));
+
+ if(pFcd->action == HIJACK_ACTION_WAIT)
+ {
+ // This exception does NOT belong to the CLR.
+ // If we belong to the CLR, then we either:
+ // - were a M2U transition, in which case we should be in a different Hijack
+ // - were a CLR exception in CLR code, in which case we should have continued and let the inproc handlers get it.
+ SPEW(fprintf(stderr, "0x%x D::FCHF: exception does not belong to the Runtime, hasEEThread=%d, pContext=0x%p\n",
+ tid, hasEEThread, pContext));
+
+ if(hasEEThread)
+ {
+ _ASSERTE(!pEEThread->GetInteropDebuggingHijacked()); // hijack is not re-entrant.
+ pEEThread->SetInteropDebuggingHijacked(TRUE);
+
+ // Setting the FilterContext must be done in cooperative mode (since it's like pushing a Frame onto the Frame chain).
+ // Thus we have a violation. We don't really need the filter context specifically here, we're just using
+ // it for legacy purposes as a way to stash the context of the original exception (that this thread was hijacked from).
+ // @todo - use another way to store the context indepedent of the Filter context.
+ CONTRACT_VIOLATION(ModeViolation);
+ _ASSERTE(g_pEEInterface->GetThreadFilterContext(pEEThread) == NULL);
+ g_pEEInterface->SetThreadFilterContext(pEEThread, pContext);
+ }
+
+ // Wait for the continue. We may / may not have an EE Thread for this, (and we're definitely
+ // not doing fiber-mode debugging), so just use a raw win32 API, and not some fancy fiber-safe call.
+ SPEW(fprintf(stderr, "0x%x D::FCHF: waiting for continue.\n", tid));
+
+ DWORD ret = WaitForSingleObject(g_pDebugger->m_pRCThread->GetDCB()->m_leftSideUnmanagedWaitEvent,
+ INFINITE);
+
+ SPEW(fprintf(stderr, "0x%x D::FCHF: waiting for continue complete.\n", tid));
+ if (ret != WAIT_OBJECT_0)
+ {
+ SPEW(fprintf(stderr, "0x%x D::FCHF: wait failed!\n", tid));
+ }
+
+ if(hasEEThread)
+ {
+ _ASSERTE(pEEThread->GetInteropDebuggingHijacked());
+ pEEThread->SetInteropDebuggingHijacked(FALSE);
+ _ASSERTE(!ISREDIRECTEDTHREAD(pEEThread));
+
+ // See violation above.
+ CONTRACT_VIOLATION(ModeViolation);
+ g_pEEInterface->SetThreadFilterContext(pEEThread, NULL);
+ _ASSERTE(g_pEEInterface->GetThreadFilterContext(pEEThread) == NULL);
+ }
+ }
+
+ SPEW(fprintf(stderr, "0x%x D::FCHF: signaling HijackComplete.\n", tid));
+ SignalHijackComplete();
+ SPEW(fprintf(stderr, "0x%x D::FCHF: done signaling HijackComplete. DebugCounter=0x%x\n", tid, pFcd->debugCounter));
+
+ // we should know what we are about to do now
+ _ASSERTE(pFcd->action != HIJACK_ACTION_WAIT);
+
+ // cleanup from above
+ if (hasEEThread)
+ {
+ SPEW(fprintf(stderr, "0x%x D::FCHF: set debugger word = NULL.\n", tid));
+ g_pEEInterface->SetThreadDebuggerWord(pEEThread, (VOID*) NULL);
+ }
+ else
+ {
+ SPEW(fprintf(stderr, "0x%x D::FCHF: set EEThreadPtr = NULL.\n", tid));
+ g_pEEInterface->SetEEThreadPtr(NULL);
+ }
+
+ } // end can't stop region
+
+ if(pFcd->action == HIJACK_ACTION_EXIT_HANDLED)
+ {
+ SPEW(fprintf(stderr, "0x%x D::FCHF: exiting with CONTINUE_EXECUTION\n", tid));
+ return EXCEPTION_CONTINUE_EXECUTION;
+ }
+ else
+ {
+ SPEW(fprintf(stderr, "0x%x D::FCHF: exiting with CONTINUE_SEARCH\n", tid));
+ _ASSERTE(pFcd->action == HIJACK_ACTION_EXIT_UNHANDLED);
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+}
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+void GenericHijackFuncHelper()
+{
+#if DOSPEW
+ DWORD tid = GetCurrentThreadId();
+#endif
+ // Hijack filters are always in the can't stop range.
+ // The RS knows this b/c it knows which threads it hijacked.
+ // Bump up the CS counter so that any further calls in the LS can see this too.
+ // (This makes places where we assert that we're in a CS region happy).
+ CantStopHolder hCantStop;
+
+ SPEW(fprintf(stderr, "0x%x D::GHF: in generic hijack.\n", tid));
+
+ // There is no need to setup any context pointer or interact with the Right Side in anyway. We simply wait for
+ // the continue event to be set.
+ SPEW(fprintf(stderr, "0x%x D::GHF: waiting for continue.\n", tid));
+
+ // If this thread has an EE thread and that EE thread has preemptive gc disabled, then mark that there is a
+ // thread at an unsafe place and enable pgc. This will allow us to sync even with this thread hijacked.
+ bool disabled = false;
+
+ Thread *pEEThread = g_pEEInterface->GetThread();
+
+ if ((pEEThread != NULL) && !(((UINT_PTR)pEEThread) & 0x01))
+ {
+ disabled = g_pEEInterface->IsPreemptiveGCDisabled();
+ _ASSERTE(!disabled);
+
+ _ASSERTE(!pEEThread->GetInteropDebuggingHijacked());
+ pEEThread->SetInteropDebuggingHijacked(TRUE);
+ }
+
+ DWORD ret = WaitForSingleObject(g_pRCThread->GetDCB()->m_leftSideUnmanagedWaitEvent,
+ INFINITE);
+
+ if (ret != WAIT_OBJECT_0)
+ {
+ SPEW(fprintf(stderr, "0x%x D::GHF: wait failed!\n", tid));
+ }
+
+ // Get the continue type. Non-zero means that the exception was not cleared by the Right Side and therefore has
+ // not been handled. Zero means that the exception has been cleared. (Presumably, the debugger altered the
+ // thread's context before clearing the exception, so continuing will give a different result.)
+ DWORD continueType = 0;
+
+ pEEThread = g_pEEInterface->GetThread();
+
+ if (((UINT_PTR)pEEThread) & 0x01)
+ {
+ // There is no EE Thread for this thread, so we null out the TLS word so we don't confuse the Runtime.
+ continueType = 1;
+ g_pEEInterface->SetEEThreadPtr(NULL);
+ pEEThread = NULL;
+ }
+ else if (pEEThread)
+ {
+ // We've got a Thread ptr, so get the continue type out of the thread's debugger word.
+ continueType = (DWORD) g_pEEInterface->GetThreadDebuggerWord(pEEThread);
+
+ _ASSERTE(pEEThread->GetInteropDebuggingHijacked());
+ pEEThread->SetInteropDebuggingHijacked(FALSE);
+ }
+
+ SPEW(fprintf(stderr, "0x%x D::GHF: continued with %d.\n", tid, continueType));
+
+ if (continueType)
+ {
+ SPEW(fprintf(stderr, "0x%x D::GHF: calling ExitProcess\n", tid));
+
+ // Continuing from a second chance exception without clearing the exception causes the process to
+ // exit. Note: the continue type will only be non-zero if this hijack was setup for a second chance
+ // exception. If the hijack was setup for another type of debug event, then we'll never get here.
+ //
+ // We explicitly terminate the process directly instead of going through any escalation policy because:
+ // 1) that's what a native-only debugger would do. Interop and Native-only should be the same.
+ // 2) there's no CLR escalation policy anyways for *native* unhandled exceptions.
+ // 3) The escalation policy may do lots of extra confusing work (like fire MDAs) that can only cause
+ // us grief.
+ TerminateProcess(GetCurrentProcess(), 0);
+ }
+
+ SPEW(fprintf(stderr, "0x%x D::GHF: signaling continue...\n", tid));
+}
+#endif
+
+
+//
+// This is the function that a thread is hijacked to by the Right Side during a variety of debug events. This function
+// must be naked.
+//
+#if defined(_TARGET_X86_)
+__declspec(naked)
+#endif // defined (_x86_)
+void Debugger::GenericHijackFunc(void)
+{
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+
+#if defined(_TARGET_X86_)
+ _asm
+ {
+ push ebp
+ mov ebp,esp
+ sub esp,__LOCAL_SIZE
+ }
+#endif
+ // We can't have C++ classes w/ dtors in a declspec naked, so just have call into a helper.
+ GenericHijackFuncHelper();
+
+#if defined(_TARGET_X86_)
+ _asm
+ {
+ mov esp,ebp
+ pop ebp
+ }
+#endif
+
+ // This signals the Right Side that this thread is ready to have its context restored.
+ ExceptionNotForRuntime();
+
+#else
+ _ASSERTE(!"@todo - port GenericHijackFunc");
+#endif // defined (_x86_)
+
+ _ASSERTE(!"Should never get here (Debugger::GenericHijackFunc)");
+}
+
+
+
+
+//#ifdef _TARGET_X86_
+//
+// This is the function that is called when we determine that a first chance exception hijack has
+// begun and memory is prepared for the RS to tell the LS what to do
+//
+void Debugger::SignalHijackStarted(void)
+{
+ WRAPPER_NO_CONTRACT;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ SignalHijackStartedFlare();
+#else
+ _ASSERTE(!"@todo - port the flares to the platform your running on.");
+#endif
+}
+
+//
+// This is the function that is called when we determine that a first chance exception really belongs to the Runtime,
+// and that that exception is due to a managed->unmanaged transition. This notifies the Right Side of this and the Right
+// Side fixes up the thread's execution state from there, making sure to remember that it needs to continue to hide the
+// hijack state of the thread.
+//
+void Debugger::ExceptionForRuntimeHandoffStart(void)
+{
+ WRAPPER_NO_CONTRACT;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ ExceptionForRuntimeHandoffStartFlare();
+#else
+ _ASSERTE(!"@todo - port the flares to the platform your running on.");
+#endif
+
+}
+
+//
+// This is the function that is called when the original handler returns after we've determined that an exception was
+// due to a managed->unmanaged transition. This notifies the Right Side of this and the Right Side fixes up the thread's
+// execution state from there, making sure to turn off its flag indicating that the thread's hijack state should still
+// be hidden.
+//
+void Debugger::ExceptionForRuntimeHandoffComplete(void)
+{
+ WRAPPER_NO_CONTRACT;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ ExceptionForRuntimeHandoffCompleteFlare();
+#else
+ _ASSERTE(!"@todo - port the flares to the platform your running on.");
+#endif
+
+}
+
+//
+// This signals the RS that a hijack function is ready to return. This will cause the RS to restore
+// the thread context
+//
+void Debugger::SignalHijackComplete(void)
+{
+ WRAPPER_NO_CONTRACT;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ SignalHijackCompleteFlare();
+#else
+ _ASSERTE(!"@todo - port the flares to the platform your running on.");
+#endif
+
+}
+
+//
+// This is the function that is called when we determine that a first chance exception does not belong to the
+// Runtime. This notifies the Right Side of this and the Right Side fixes up the thread's execution state from there.
+//
+void Debugger::ExceptionNotForRuntime(void)
+{
+ WRAPPER_NO_CONTRACT;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ ExceptionNotForRuntimeFlare();
+#else
+ _ASSERTE(!"@todo - port the flares to the platform your running on.");
+#endif
+}
+
+//
+// This is the function that is called when we want to send a sync complete event to the Right Side when it is the Win32
+// debugger of this process. This notifies the Right Side of this and the Right Side fixes up the thread's execution
+// state from there.
+//
+void Debugger::NotifyRightSideOfSyncComplete(void)
+{
+ WRAPPER_NO_CONTRACT;
+ STRESS_LOG0(LF_CORDB, LL_INFO100000, "D::NRSOSC: Sending flare...\n");
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+ NotifyRightSideOfSyncCompleteFlare();
+#else
+ _ASSERTE(!"@todo - port the flares to the platform your running on.");
+#endif
+ STRESS_LOG0(LF_CORDB, LL_INFO100000, "D::NRSOSC: Flare sent\n");
+}
+
+#endif // FEATURE_INTEROP_DEBUGGING
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+bool Debugger::GetILOffsetFromNative (MethodDesc *pFunc, const BYTE *pbAddr,
+ DWORD nativeOffset, DWORD *ilOffset)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS_FROM_GETJITINFO;
+ }
+ CONTRACTL_END;
+
+ if (!HasLazyData())
+ {
+ DebuggerLockHolder dbgLockHolder(this);
+ // This is an entry path into the debugger, so make sure we're inited.
+ LazyInit();
+ }
+
+ // Sometimes we'll get called w/ an instantiating stub MD.
+ if (pFunc->IsWrapperStub())
+ {
+ pFunc = pFunc->GetWrappedMethodDesc();
+ }
+
+ DebuggerJitInfo *jitInfo =
+ GetJitInfo(pFunc, (const BYTE *)pbAddr);
+
+ if (jitInfo != NULL)
+ {
+ CorDebugMappingResult map;
+ DWORD whichIDontCare;
+
+ *ilOffset = jitInfo->MapNativeOffsetToIL(
+ nativeOffset,
+ &map,
+ &whichIDontCare);
+
+ return true;
+ }
+
+ return false;
+}
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+DWORD Debugger::GetHelperThreadID(void )
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pRCThread->GetDCB()
+ ->m_temporaryHelperThreadId;
+}
+
+
+// HRESULT Debugger::InsertToMethodInfoList(): Make sure
+// that there's only one head of the the list of DebuggerMethodInfos
+// for the (implicitly) given MethodDef/Module pair.
+HRESULT
+Debugger::InsertToMethodInfoList( DebuggerMethodInfo *dmi )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_INFO10000,"D:IAHOL DMI: dmi:0x%08x\n", dmi));
+
+ HRESULT hr = S_OK;
+
+ _ASSERTE(dmi != NULL);
+
+ _ASSERTE(HasDebuggerDataLock());
+
+ // CHECK_DJI_TABLE_DEBUGGER;
+
+ hr = CheckInitMethodInfoTable();
+
+ if (FAILED(hr)) {
+ return (hr);
+ }
+
+ DebuggerMethodInfo *dmiPrev = m_pMethodInfos->GetMethodInfo(dmi->m_module, dmi->m_token);
+
+ _ASSERTE((dmiPrev == NULL) || ((dmi->m_token == dmiPrev->m_token) && (dmi->m_module == dmiPrev->m_module)));
+
+ LOG((LF_CORDB,LL_INFO10000,"D:IAHOL: current head of dmi list:0x%08x\n",dmiPrev));
+
+ if (dmiPrev != NULL)
+ {
+ dmi->m_prevMethodInfo = dmiPrev;
+ dmiPrev->m_nextMethodInfo = dmi;
+
+ _ASSERTE(dmi->m_module != NULL);
+ hr = m_pMethodInfos->OverwriteMethodInfo(dmi->m_module,
+ dmi->m_token,
+ dmi,
+ FALSE);
+
+ LOG((LF_CORDB,LL_INFO10000,"D:IAHOL: DMI version 0x%04x for token 0x%08x\n",
+ dmi->GetCurrentEnCVersion(),dmi->m_token));
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "AddMethodInfo being called in D:IAHOL\n"));
+ hr = m_pMethodInfos->AddMethodInfo(dmi->m_module,
+ dmi->m_token,
+ dmi);
+ }
+#ifdef _DEBUG
+ dmiPrev = m_pMethodInfos->GetMethodInfo(dmi->m_module, dmi->m_token);
+ LOG((LF_CORDB,LL_INFO10000,"D:IAHOL: new head of dmi list:0x%08x\n",
+ dmiPrev));
+#endif //_DEBUG
+
+ // DebuggerDataLockHolder out of scope - release implied
+ return hr;
+}
+
+//-----------------------------------------------------------------------------
+// Helper to get an SString through the IPC buffer.
+// We do this by putting the SString data into a LS_RS_buffer object,
+// and then the RS reads it out as soon as it's queued.
+// It's very very important that the SString's buffer is around while we send the event.
+// So we pass the SString by reference in case there's an implicit conversion (because
+// we don't want to do the conversion on a temporary object and then lose that object).
+//-----------------------------------------------------------------------------
+void SetLSBufferFromSString(Ls_Rs_StringBuffer * pBuffer, SString & str)
+{
+ // Copy string contents (+1 for null terminator) into a LS_RS_Buffer.
+ // Then the RS can pull it out as a null-terminated string.
+ pBuffer->SetLsData(
+ (BYTE*) str.GetUnicode(),
+ (str.GetCount() +1)* sizeof(WCHAR)
+ );
+}
+
+//*************************************************************
+// structure that we to marshal MDA Notification event data.
+//*************************************************************
+struct SendMDANotificationParams
+{
+ Thread * m_pThread; // may be NULL. Lets us send on behalf of other threads.
+
+ // Pass SStrings by ptr in case to guarantee that they're shared (in case we internally modify their storage).
+ SString * m_szName;
+ SString * m_szDescription;
+ SString * m_szXML;
+ CorDebugMDAFlags m_flags;
+
+ SendMDANotificationParams(
+ Thread * pThread, // may be NULL. Lets us send on behalf of other threads.
+ SString * szName,
+ SString * szDescription,
+ SString * szXML,
+ CorDebugMDAFlags flags
+ ) :
+ m_pThread(pThread),
+ m_szName(szName),
+ m_szDescription(szDescription),
+ m_szXML(szXML),
+ m_flags(flags)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+};
+
+//-----------------------------------------------------------------------------
+// Actually send the MDA event. (Could be on any thread)
+// Parameters:
+// params - data to initialize the IPC event.
+//-----------------------------------------------------------------------------
+void Debugger::SendRawMDANotification(
+ SendMDANotificationParams * params
+)
+{
+ // Send the unload assembly event to the Right Side.
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+
+ Thread * pThread = params->m_pThread;
+ AppDomain *pAppDomain = (pThread != NULL) ? pThread->GetDomain() : NULL;
+
+ InitIPCEvent(ipce,
+ DB_IPCE_MDA_NOTIFICATION,
+ pThread,
+ pAppDomain);
+
+ SetLSBufferFromSString(&ipce->MDANotification.szName, *(params->m_szName));
+ SetLSBufferFromSString(&ipce->MDANotification.szDescription, *(params->m_szDescription));
+ SetLSBufferFromSString(&ipce->MDANotification.szXml, *(params->m_szXML));
+ ipce->MDANotification.dwOSThreadId = GetCurrentThreadId();
+ ipce->MDANotification.flags = params->m_flags;
+
+ m_pRCThread->SendIPCEvent();
+}
+
+//-----------------------------------------------------------------------------
+// Send an MDA notification. This ultimately translates to an ICorDebugMDA object on the Right-Side.
+// Called by EE to send a MDA debug event. This will block on the debug event
+// until the RS continues us.
+// Debugger may or may not be attached. If bAttached, then this
+// will trigger a jitattach as well.
+// See MDA documentation for what szName, szDescription + szXML should look like.
+// The debugger just passes them through.
+//
+// Parameters:
+// pThread - thread for debug event. May be null.
+// szName - short name of MDA.
+// szDescription - full description of MDA.
+// szXML - xml string for MDA.
+// bAttach - do a JIT-attach
+//-----------------------------------------------------------------------------
+void Debugger::SendMDANotification(
+ Thread * pThread, // may be NULL. Lets us send on behalf of other threads.
+ SString * szName,
+ SString * szDescription,
+ SString * szXML,
+ CorDebugMDAFlags flags,
+ BOOL bAttach
+)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PREFIX_ASSUME(szName != NULL);
+ PREFIX_ASSUME(szDescription != NULL);
+ PREFIX_ASSUME(szXML != NULL);
+
+ // Note: we normally don't send events like this when there is an unrecoverable error. However,
+ // if a host attempts to setup fiber mode on a thread, then we'll set an unrecoverable error
+ // and use an MDA to 1) tell the user and 2) get the Right Side to notice the unrecoverable error.
+ // Therefore, we'll go ahead and send a MDA event if the unrecoverable error is
+ // CORDBG_E_CANNOT_DEBUG_FIBER_PROCESS.
+ DebuggerIPCControlBlock *pDCB = m_pRCThread->GetDCB();
+
+
+ // If the MDA is ocuring very early in startup before the DCB is setup, then bail.
+ if (pDCB == NULL)
+ {
+ return;
+ }
+
+ if (CORDBUnrecoverableError(this) && (pDCB->m_errorHR != CORDBG_E_CANNOT_DEBUG_FIBER_PROCESS))
+ {
+ return;
+ }
+
+ // Validate flags. Make sure that folks don't start passing flags that we don't handle.
+ // If pThread != current thread, caller should either pass in MDA_FLAG_SLIP or guarantee
+ // that pThread is not slipping.
+ _ASSERTE((flags & ~(MDA_FLAG_SLIP)) == 0);
+
+ // Helper thread should not be triggering MDAs. The helper thread is executing code in a very constrained
+ // and controlled region and shouldn't be able to do anything dangerous.
+ // If we revise this in the future, we should probably just post the event to the RS w/ use the MDA_FLAG_SLIP flag,
+ // and then not bother suspending the runtime. The RS will get it on its next event.
+ // The jit-attach logic below assumes we're not on the helper. (If we are on the helper, then a debugger should already
+ // be attached)
+ if (ThisIsHelperThreadWorker())
+ {
+ CONSISTENCY_CHECK_MSGF(false, ("MDA '%s' fired on *helper* thread.\r\nDesc:%s",
+ szName->GetUnicode(), szDescription->GetUnicode()
+ ));
+
+ // If for some reason we're wrong about the assert above, we'll just ignore the MDA (rather than potentially deadlock)
+ return;
+ }
+
+ // Public entry point into the debugger. May cause a jit-attach, so we may need to be lazily-init.
+ if (!HasLazyData())
+ {
+ DebuggerLockHolder dbgLockHolder(this);
+ // This is an entry path into the debugger, so make sure we're inited.
+ LazyInit();
+ }
+
+
+ // Cases:
+ // 1) Debugger already attached, send event normally (ignore severity)
+ // 2) No debugger attached, Non-severe probe - ignore.
+ // 3) No debugger attached, Severe-probe - do a jit-attach.
+ bool fTryJitAttach = bAttach == TRUE;
+
+ // Check case #2 - no debugger, and no jit-attach. Early opt out.
+ if (!CORDebuggerAttached() && !fTryJitAttach)
+ {
+ return;
+ }
+
+ if (pThread == NULL)
+ {
+ // If there's no thread object, then we're not blocking after the event,
+ // and thus this probe may slip.
+ flags = (CorDebugMDAFlags) (flags | MDA_FLAG_SLIP);
+ }
+
+ {
+ GCX_PREEMP_EEINTERFACE_TOGGLE_IFTHREAD();
+
+ // For "Severe" probes, we'll do a jit attach dialog
+ if (fTryJitAttach)
+ {
+ // May return:
+ // - S_OK if we do a jit-attach,
+ // - S_FALSE if a debugger is already attached.
+ // - Error in other cases..
+
+ JitAttach(pThread, NULL, TRUE, FALSE);
+ }
+
+ // Debugger may be attached now...
+ if (CORDebuggerAttached())
+ {
+ SendMDANotificationParams params(pThread, szName, szDescription, szXML, flags);
+
+ // Non-attach case. Send like normal event.
+ // This includes if someone launch the debugger during the meantime.
+ // just send the event
+ SENDIPCEVENT_BEGIN(this, pThread);
+
+ // Send Log message event to the Right Side
+ SendRawMDANotification(&params);
+
+ // Stop all Runtime threads
+ // Even if we don't have a managed thead object, this will catch us at the next good spot.
+ TrapAllRuntimeThreads();
+
+ // Let other Runtime threads handle their events.
+ SENDIPCEVENT_END;
+ }
+ } // end of GCX_PREEMP_EEINTERFACE_TOGGLE()
+}
+
+//*************************************************************
+// This method sends a log message over to the right side for the debugger to log it.
+//
+// The CLR doesn't assign any semantics to the level or cateogory values.
+// The BCL has a level convention (LoggingLevels enum), but this isn't exposed publicly,
+// so we shouldn't base our behavior on it in any way.
+//*************************************************************
+void Debugger::SendLogMessage(int iLevel,
+ SString * pSwitchName,
+ SString * pMessage)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::SLM: Sending log message.\n"));
+
+ // Send the message only if the debugger is attached to this appdomain.
+ // Note the the debugger may detach at any time, so we'll have to check
+ // this again after we get the lock.
+ AppDomain *pAppDomain = g_pEEInterface->GetThread()->GetDomain();
+
+ if (!CORDebuggerAttached())
+ {
+ return;
+ }
+
+ Thread *pThread = g_pEEInterface->GetThread();
+ SENDIPCEVENT_BEGIN(this, pThread);
+
+ // Send Log message event to the Right Side
+ SendRawLogMessage(
+ pThread,
+ pAppDomain,
+ iLevel,
+ pSwitchName,
+ pMessage);
+
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+
+ // Let other Runtime threads handle their events.
+ SENDIPCEVENT_END;
+}
+
+
+//*************************************************************
+//
+// Helper function to just send LogMessage event. Can be called on either
+// helper thread or managed thread.
+//
+//*************************************************************
+void Debugger::SendRawLogMessage(
+ Thread *pThread,
+ AppDomain *pAppDomain,
+ int iLevel,
+ SString * pCategory,
+ SString * pMessage
+)
+{
+ DebuggerIPCEvent* ipce;
+
+
+ // We should have hold debugger lock
+ // This can happen on either native helper thread or managed thread
+ _ASSERTE(ThreadHoldsLock());
+
+ // It's possible that the debugger dettached while we were waiting
+ // for our lock. Check again and abort the event if it did.
+ if (!CORDebuggerAttached())
+ {
+ return;
+ }
+
+ ipce = m_pRCThread->GetIPCEventSendBuffer();
+
+ // Send a LogMessage event to the Right Side
+ InitIPCEvent(ipce,
+ DB_IPCE_FIRST_LOG_MESSAGE,
+ pThread,
+ pAppDomain);
+
+ ipce->FirstLogMessage.iLevel = iLevel;
+ ipce->FirstLogMessage.szCategory.SetString(pCategory->GetUnicode());
+ SetLSBufferFromSString(&ipce->FirstLogMessage.szContent, *pMessage);
+
+ m_pRCThread->SendIPCEvent();
+}
+
+
+// This function sends a message to the right side informing it about
+// the creation/modification of a LogSwitch
+void Debugger::SendLogSwitchSetting(int iLevel,
+ int iReason,
+ __in_z LPCWSTR pLogSwitchName,
+ __in_z LPCWSTR pParentSwitchName)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000, "D::SLSS: Sending log switch message switch=%S parent=%S.\n",
+ pLogSwitchName, pParentSwitchName));
+
+ // Send the message only if the debugger is attached to this appdomain.
+ if (!CORDebuggerAttached())
+ {
+ return;
+ }
+
+ Thread *pThread = g_pEEInterface->GetThread();
+ SENDIPCEVENT_BEGIN(this, pThread);
+
+ if (CORDebuggerAttached())
+ {
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce,
+ DB_IPCE_LOGSWITCH_SET_MESSAGE,
+ pThread,
+ pThread->GetDomain());
+
+ ipce->LogSwitchSettingMessage.iLevel = iLevel;
+ ipce->LogSwitchSettingMessage.iReason = iReason;
+
+
+ ipce->LogSwitchSettingMessage.szSwitchName.SetString(pLogSwitchName);
+
+ if (pParentSwitchName == NULL)
+ {
+ pParentSwitchName = W("");
+ }
+
+ ipce->LogSwitchSettingMessage.szParentSwitchName.SetString(pParentSwitchName);
+
+ m_pRCThread->SendIPCEvent();
+
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO1000, "D::SLSS: Skipping SendIPCEvent because RS detached."));
+ }
+
+ SENDIPCEVENT_END;
+}
+
+// send a custom debugger notification to the RS
+// Arguments:
+// input: pThread - thread on which the notification occurred
+// pDomain - domain file for the domain in which the notification occurred
+// classToken - metadata token for the type of the notification object
+void Debugger::SendCustomDebuggerNotification(Thread * pThread,
+ DomainFile * pDomain,
+ mdTypeDef classToken)
+{
+ CONTRACTL
+ {
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "D::SLM: Sending log message.\n"));
+
+ // Send the message only if the debugger is attached to this appdomain.
+ // Note the the debugger may detach at any time, so we'll have to check
+ // this again after we get the lock.
+ if (!CORDebuggerAttached())
+ {
+ return;
+ }
+
+ Thread *curThread = g_pEEInterface->GetThread();
+ SENDIPCEVENT_BEGIN(this, curThread);
+
+ if (CORDebuggerAttached())
+ {
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce,
+ DB_IPCE_CUSTOM_NOTIFICATION,
+ curThread,
+ curThread->GetDomain());
+
+ VMPTR_DomainFile vmDomainFile = VMPTR_DomainFile::MakePtr(pDomain);
+
+ ipce->CustomNotification.classToken = classToken;
+ ipce->CustomNotification.vmDomainFile = vmDomainFile;
+
+
+ m_pRCThread->SendIPCEvent();
+
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO1000, "D::SCDN: Skipping SendIPCEvent because RS detached."));
+ }
+
+ SENDIPCEVENT_END;
+}
+
+
+//-----------------------------------------------------------------------------
+//
+// Add the AppDomain to the list stored in the IPC block. It adds the id and
+// the name.
+//
+// Arguments:
+// pAppDomain - The runtime app domain object to add.
+//
+// Return Value:
+// S_OK on success, else detailed error code.
+//
+HRESULT Debugger::AddAppDomainToIPC(AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ LPCWSTR szName = NULL;
+
+ LOG((LF_CORDB, LL_INFO100, "D::AADTIPC: Executing AADTIPC for AppDomain 0x%08x (0x%x).\n",
+ pAppDomain,
+ pAppDomain->GetId().m_dwId));
+
+ STRESS_LOG2(LF_CORDB, LL_INFO10000, "D::AADTIPC: AddAppDomainToIPC:%#08x, %#08x\n",
+ pAppDomain, pAppDomain->GetId().m_dwId);
+
+
+
+ _ASSERTE(m_pAppDomainCB->m_iTotalSlots > 0);
+ _ASSERTE(m_pAppDomainCB->m_rgListOfAppDomains != NULL);
+
+ {
+ //
+ // We need to synchronize this routine with the attach logic. The "normal"
+ // attach case uses the HelperThread and TrapAllRuntimeThreads to synchronize
+ // the runtime before sending any of the events (including AppDomainCreates)
+ // to the right-side. Thus, we can synchronize with this case by forcing us
+ // to go co-operative. If we were already co-op, then the helper thread will
+ // wait to start the attach until all co-op threads are paused. If we were
+ // pre-emptive, then going co-op will suspend us until the HelperThread finishes.
+ //
+ // The second case is under the IPC event for ATTACHING, which is where there are
+ // zero app domains, so it is considered an 'early attach' case. To synchronize
+ // with this we have to grab and hold the AppDomainDB lock.
+ //
+
+ GCX_COOP();
+
+ // Lock the list
+ if (!m_pAppDomainCB->Lock())
+ {
+ return E_FAIL;
+ }
+
+ // Get a free entry from the list
+ AppDomainInfo *pAppDomainInfo = m_pAppDomainCB->GetFreeEntry();
+
+ // Function returns NULL if the list is full and a realloc failed.
+ if (!pAppDomainInfo)
+ {
+ hr = E_OUTOFMEMORY;
+ goto LErrExit;
+ }
+
+ // copy the ID
+ pAppDomainInfo->m_id = pAppDomain->GetId().m_dwId;
+
+ // Now set the AppDomainName.
+
+ /*
+ * TODO :
+ *
+ * Make sure that returning NULL here does not result in a catastrophic
+ * failure.
+ *
+ * GetFriendlyNameNoThrow may call SetFriendlyName, which may call
+ * UpdateAppDomainEntryInIPC. There is no recursive death, however, because
+ * the AppDomainInfo object does not contain a pointer to the app domain
+ * yet.
+ */
+ szName = pAppDomain->GetFriendlyNameForDebugger();
+ pAppDomainInfo->SetName(szName);
+
+ // Save on to the appdomain pointer
+ pAppDomainInfo->m_pAppDomain = pAppDomain;
+
+ // bump the used slot count
+ m_pAppDomainCB->m_iNumOfUsedSlots++;
+
+LErrExit:
+ // UnLock the list
+ m_pAppDomainCB->Unlock();
+
+ // Send event to debugger if one is attached.
+ if (CORDebuggerAttached())
+ {
+ SendCreateAppDomainEvent(pAppDomain);
+ }
+ }
+
+ return hr;
+}
+
+
+/******************************************************************************
+ * Remove the AppDomain from the list stored in the IPC block and send an ExitAppDomain
+ * event to the debugger if attached.
+ ******************************************************************************/
+HRESULT Debugger::RemoveAppDomainFromIPC (AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = E_FAIL;
+
+ LOG((LF_CORDB, LL_INFO100, "D::RADFIPC: Executing RADFIPC for AppDomain 0x%08x (0x%x).\n",
+ pAppDomain,
+ pAppDomain->GetId().m_dwId));
+
+ // if none of the slots are occupied, then simply return.
+ if (m_pAppDomainCB->m_iNumOfUsedSlots == 0)
+ return hr;
+
+ // Lock the list
+ if (!m_pAppDomainCB->Lock())
+ return (E_FAIL);
+
+
+ // Look for the entry
+ AppDomainInfo *pADInfo = m_pAppDomainCB->FindEntry(pAppDomain);
+
+ // Shouldn't be trying to remove an appdomain that was never added
+ if (!pADInfo)
+ {
+ // We'd like to assert this, but there is a small window where we may have
+ // called AppDomain::Init (and so it's fair game to call Stop, and hence come here),
+ // but not yet published the app domain.
+ // _ASSERTE(!"D::RADFIPC: trying to remove an AppDomain that was never added");
+ hr = (E_FAIL);
+ goto ErrExit;
+ }
+
+ // Release the entry
+ m_pAppDomainCB->FreeEntry(pADInfo);
+
+ErrExit:
+ // UnLock the list
+ m_pAppDomainCB->Unlock();
+
+ // send event to debugger if one is attached
+ if (CORDebuggerAttached())
+ {
+ SendExitAppDomainEvent(pAppDomain);
+ }
+
+ return hr;
+}
+
+/******************************************************************************
+ * Update the AppDomain in the list stored in the IPC block.
+ ******************************************************************************/
+HRESULT Debugger::UpdateAppDomainEntryInIPC(AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ if (GetThread()) { GC_TRIGGERS;} else {DISABLED(GC_NOTRIGGER);}
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ LPCWSTR szName = NULL;
+
+ LOG((LF_CORDB, LL_INFO100,
+ "D::UADEIIPC: Executing UpdateAppDomainEntryInIPC ad:0x%x.\n",
+ pAppDomain));
+
+ // if none of the slots are occupied, then simply return.
+ if (m_pAppDomainCB->m_iNumOfUsedSlots == 0)
+ return (E_FAIL);
+
+ // Lock the list
+ if (!m_pAppDomainCB->Lock())
+ return (E_FAIL);
+
+ // Look up the info entry
+ AppDomainInfo *pADInfo = m_pAppDomainCB->FindEntry(pAppDomain);
+
+ if (!pADInfo)
+ {
+ hr = E_FAIL;
+ goto ErrExit;
+ }
+
+ // Update the name only if new name is non-null
+ szName = pADInfo->m_pAppDomain->GetFriendlyNameForDebugger();
+ pADInfo->SetName(szName);
+
+ LOG((LF_CORDB, LL_INFO100,
+ "D::UADEIIPC: New name:%ls (AD:0x%x)\n", pADInfo->m_szAppDomainName,
+ pAppDomain));
+
+ErrExit:
+ // UnLock the list
+ m_pAppDomainCB->Unlock();
+
+ return hr;
+}
+
+HRESULT Debugger::CopyModulePdb(Module* pRuntimeModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(ThisIsHelperThread());
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!pRuntimeModule->IsVisibleToDebugger())
+ {
+ return S_OK;
+ }
+
+ HRESULT hr = S_OK;
+#ifdef FEATURE_FUSION
+ //
+ // Populate the pdb to fusion cache.
+ //
+ if (pRuntimeModule->IsIStream() == FALSE)
+ {
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+
+ EX_TRY
+ {
+ pRuntimeModule->FusionCopyPDBs(pRuntimeModule->GetPath());
+ }
+ EX_CATCH_HRESULT(hr); // ignore failures
+ }
+#endif // FEATURE_FUSION
+
+ return hr;
+}
+
+/******************************************************************************
+ * When attaching to a process, this is called to enumerate all of the
+ * AppDomains currently in the process and allow modules pdbs to be copied over to the shadow dir maintaining out V2 in-proc behaviour.
+ ******************************************************************************/
+HRESULT Debugger::IterateAppDomainsForPdbs()
+{
+ CONTRACTL
+ {
+ THROWS;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(ThisIsHelperThread());
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ STRESS_LOG0(LF_CORDB, LL_INFO100, "Entered function IterateAppDomainsForPdbs()\n");
+ HRESULT hr = S_OK;
+
+ // Lock the list
+ if (!m_pAppDomainCB->Lock())
+ return (E_FAIL);
+
+ // Iterate through the app domains
+ AppDomainInfo *pADInfo = m_pAppDomainCB->FindFirst();
+
+ while (pADInfo)
+ {
+ STRESS_LOG3(LF_CORDB, LL_INFO100, "Iterating over domain %#08x AD:%#08x %ls\n", pADInfo->m_pAppDomain->GetId().m_dwId, pADInfo->m_pAppDomain, pADInfo->m_szAppDomainName);
+
+ AppDomain::AssemblyIterator i;
+ i = pADInfo->m_pAppDomain->IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded | kIncludeLoading | kIncludeExecution));
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+ while (i.Next(pDomainAssembly.This()))
+ {
+ if (!pDomainAssembly->IsVisibleToDebugger())
+ continue;
+
+ DomainAssembly::ModuleIterator j = pDomainAssembly->IterateModules(kModIterIncludeLoading);
+ while (j.Next())
+ {
+ DomainFile * pDomainFile = j.GetDomainFile();
+ if (!pDomainFile->ShouldNotifyDebugger())
+ continue;
+
+ Module* pRuntimeModule = pDomainFile->GetModule();
+ CopyModulePdb(pRuntimeModule);
+ }
+ if (pDomainAssembly->ShouldNotifyDebugger())
+ {
+ CopyModulePdb(pDomainAssembly->GetModule());
+ }
+ }
+
+ // Get the next appdomain in the list
+ pADInfo = m_pAppDomainCB->FindNext(pADInfo);
+ }
+
+ // Unlock the list
+ m_pAppDomainCB->Unlock();
+
+ STRESS_LOG0(LF_CORDB, LL_INFO100, "Exiting function IterateAppDomainsForPdbs\n");
+
+ return hr;
+}
+
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+HRESULT Debugger::InitAppDomainIPC(void)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+
+ PRECONDITION(CheckPointer(m_pAppDomainCB));
+ }
+ CONTRACTL_END;
+
+ // Ensure that if we throw here, the Terminate will get called and cleanup all resources.
+ // This will make Init an atomic operation - it either fully inits or fully fails.
+ class EnsureCleanup
+ {
+ Debugger * m_pThis;
+
+ public:
+ EnsureCleanup(Debugger * pThis)
+ {
+ m_pThis = pThis;
+ }
+
+ void SupressCleanup()
+ {
+ m_pThis = NULL;
+ }
+
+ ~EnsureCleanup()
+ {
+ if (m_pThis != NULL)
+ {
+ m_pThis->TerminateAppDomainIPC();
+ }
+ }
+ } hEnsureCleanup(this);
+
+ DWORD dwStrLen = 0;
+ WCHAR szExeName[MAX_PATH];
+ int i;
+
+ // all fields in the object can be zero initialized.
+ // If we throw, before fully initializing this, then cleanup won't try to free
+ // uninited values.
+ ZeroMemory(m_pAppDomainCB, sizeof(*m_pAppDomainCB));
+
+ // Fix for issue: whidbey 143061
+ // We are creating the mutex as hold, when we unlock, the EndThreadAffinity in
+ // hosting case will be unbalanced.
+ // Ideally, I would like to fix this by creating mutex not-held and call Lock method.
+ // This way, when we clean up the OOM, (as you can tell, we never release the mutex in
+ // some error cases), we can change it to holder class.
+ //
+ Thread::BeginThreadAffinity();
+
+ // Create a mutex to allow the Left and Right Sides to properly
+ // synchronize. The Right Side will spin until m_hMutex is valid,
+ // then it will acquire it before accessing the data.
+ HandleHolder hMutex(WszCreateMutex(NULL, TRUE/*hold*/, NULL));
+ if (hMutex == NULL)
+ {
+ ThrowLastError();
+ }
+ if (!m_pAppDomainCB->m_hMutex.SetLocal(hMutex))
+ {
+ ThrowLastError();
+ }
+ hMutex.SuppressRelease();
+
+ m_pAppDomainCB->m_iSizeInBytes = INITIAL_APP_DOMAIN_INFO_LIST_SIZE *
+ sizeof (AppDomainInfo);
+
+ // Number of slots in AppDomainListElement array
+ m_pAppDomainCB->m_rgListOfAppDomains = new AppDomainInfo[INITIAL_APP_DOMAIN_INFO_LIST_SIZE];
+ _ASSERTE(m_pAppDomainCB->m_rgListOfAppDomains != NULL); // throws on oom
+
+
+ m_pAppDomainCB->m_iTotalSlots = INITIAL_APP_DOMAIN_INFO_LIST_SIZE;
+
+ // Initialize each AppDomainListElement
+ for (i = 0; i < INITIAL_APP_DOMAIN_INFO_LIST_SIZE; i++)
+ {
+ m_pAppDomainCB->m_rgListOfAppDomains[i].FreeEntry();
+ }
+
+ // also initialize the process name
+ dwStrLen = WszGetModuleFileName(NULL,
+ szExeName,
+ MAX_PATH);
+
+ // If we couldn't get the name, then use a nice default.
+ if (dwStrLen == 0)
+ {
+ wcscpy_s(szExeName, COUNTOF(szExeName), W("<NoProcessName>"));
+ dwStrLen = (DWORD)wcslen(szExeName);
+ }
+
+ // If we got the name, copy it into a buffer. dwStrLen is the
+ // count of characters in the name, not including the null
+ // terminator.
+ m_pAppDomainCB->m_szProcessName = new WCHAR[dwStrLen + 1];
+ _ASSERTE(m_pAppDomainCB->m_szProcessName != NULL); // throws on oom
+
+ wcscpy_s(m_pAppDomainCB->m_szProcessName, dwStrLen + 1, szExeName);
+
+ // Add 1 to the string length so the Right Side will copy out the
+ // null terminator, too.
+ m_pAppDomainCB->m_iProcessNameLengthInBytes = (dwStrLen + 1) * sizeof(WCHAR);
+
+ if (m_pAppDomainCB->m_hMutex != NULL)
+ {
+ m_pAppDomainCB->Unlock();
+ }
+
+ hEnsureCleanup.SupressCleanup();
+ return S_OK;
+}
+
+/******************************************************************************
+ * Unitialize the AppDomain IPC block
+ * Returns:
+ * S_OK -if fully unitialized
+ * E_FAIL - if we can't get ownership of the block, and thus no unitialization
+ * work is done.
+ ******************************************************************************/
+HRESULT Debugger::TerminateAppDomainIPC(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+ // If we have no AppDomain block, then we can consider it's already terminated.
+ if (m_pAppDomainCB == NULL)
+ return S_OK;
+
+ HRESULT hr = S_OK;
+
+ // Lock the list
+ // If there's no mutex, then we're in a partially created state.
+ // This means InitAppDomainIPC failed halfway through. But we're still thread safe
+ // since other threads can't access us if we don't have the mutex.
+ if ((m_pAppDomainCB->m_hMutex != NULL) && !m_pAppDomainCB->Lock())
+ {
+ // The callers don't check our return value, we may want to know when we can't gracefully clean up
+ LOG((LF_CORDB, LL_INFO10, "Debugger::TerminateAppDomainIPC: Failed to get AppDomain IPC lock, not cleaning up.\n"));
+
+ // If the lock is valid, but we can't get it, then we can't really
+ // uninitialize since someone else is using the block.
+ return (E_FAIL);
+ }
+
+ // The shared IPC segment could still be around after the debugger
+ // object has been destroyed during process shutdown. So, reset
+ // the UsedSlots count to 0 so that any out of process clients
+ // enumeratingthe app domains in this process see 0 AppDomains.
+ m_pAppDomainCB->m_iNumOfUsedSlots = 0;
+ m_pAppDomainCB->m_iTotalSlots = 0;
+
+ // Now delete the memory alloacted for AppDomainInfo array
+ delete [] m_pAppDomainCB->m_rgListOfAppDomains;
+ m_pAppDomainCB->m_rgListOfAppDomains = NULL;
+
+ delete [] m_pAppDomainCB->m_szProcessName;
+ m_pAppDomainCB->m_szProcessName = NULL;
+ m_pAppDomainCB->m_iProcessNameLengthInBytes = 0;
+
+ // Set the mutex handle to NULL.
+ // If the Right Side acquires the mutex, it will verify
+ // that the handle is still not NULL. If it is, then it knows it
+ // really lost.
+ RemoteHANDLE m = m_pAppDomainCB->m_hMutex;
+ m_pAppDomainCB->m_hMutex.m_hLocal = NULL;
+
+ // And bring us back to a fully unintialized state.
+ ZeroMemory(m_pAppDomainCB, sizeof(*m_pAppDomainCB));
+
+ // We're done. release and close the mutex. Note that this must be done
+ // after we clear it out above to ensure there is no race condition.
+ if( m != NULL )
+ {
+ VERIFY(ReleaseMutex(m));
+ m.Close();
+ }
+
+ return hr;
+}
+
+
+#ifndef DACCESS_COMPILE
+
+//
+// FuncEvalSetup sets up a function evaluation for the given method on the given thread.
+//
+HRESULT Debugger::FuncEvalSetup(DebuggerIPCE_FuncEvalInfo *pEvalInfo,
+ BYTE **argDataArea,
+ DebuggerEval **debuggerEvalKey)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ Thread *pThread = pEvalInfo->vmThreadToken.GetRawPtr();
+
+
+ //
+ // If TS_AbortRequested (which may have been set by a pending FuncEvalAbort),
+ // we will not be able to do a new func-eval
+ //
+ // <TODO>@TODO: Remember the current value of m_State, reset m_State as appropriate,
+ // do the new func-eval, and then set m_State to the original value</TODO>
+ if (pThread->m_State & Thread::TS_AbortRequested)
+ return CORDBG_E_FUNC_EVAL_BAD_START_POINT;
+
+ if (g_fProcessDetach)
+ return CORDBG_E_FUNC_EVAL_BAD_START_POINT;
+
+ // If there is no guard page on this thread, then we've taken a stack overflow exception and can't run managed
+ // code on this thread. Therefore, we can't do a func eval on this thread.
+ if (!pThread->DetermineIfGuardPagePresent())
+ {
+ return CORDBG_E_ILLEGAL_IN_STACK_OVERFLOW;
+ }
+
+ bool fInException = pEvalInfo->evalDuringException;
+
+ // The thread has to be at a GC safe place for now, just in case the func eval causes a collection. Processing an
+ // exception also counts as a "safe place." Eventually, we'd like to have to avoid this check and eval anyway, but
+ // that's a way's off...
+ if (!fInException && !g_pDebugger->IsThreadAtSafePlace(pThread))
+ return CORDBG_E_ILLEGAL_AT_GC_UNSAFE_POINT;
+
+ // For now, we assume that the target thread must be stopped in managed code due to a single step or a
+ // breakpoint. Being stopped while sending a first or second chance exception is also valid, and there may or may
+ // not be a filter context when we do a func eval from such places. This will loosen over time, eventually allowing
+ // threads that are stopped anywhere in managed code to perform func evals.
+ CONTEXT *filterContext = GetManagedStoppedCtx(pThread);
+
+ if (filterContext == NULL && !fInException)
+ {
+ return CORDBG_E_ILLEGAL_AT_GC_UNSAFE_POINT;
+ }
+
+ // Create a DebuggerEval to hold info about this eval while its in progress. Constructor copies the thread's
+ // CONTEXT.
+ DebuggerEval *pDE = new (interopsafeEXEC, nothrow) DebuggerEval(filterContext, pEvalInfo, fInException);
+
+ if (pDE == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ else if (!pDE->Init())
+ {
+ // We fail to change the m_breakpointInstruction field to PAGE_EXECUTE_READWRITE permission.
+ return E_FAIL;
+ }
+
+ SIZE_T argDataAreaSize = 0;
+
+ argDataAreaSize += pEvalInfo->genericArgsNodeCount * sizeof(DebuggerIPCE_TypeArgData);
+
+ if ((pEvalInfo->funcEvalType == DB_IPCE_FET_NORMAL) ||
+ (pEvalInfo->funcEvalType == DB_IPCE_FET_NEW_OBJECT) ||
+ (pEvalInfo->funcEvalType == DB_IPCE_FET_NEW_OBJECT_NC))
+ argDataAreaSize += pEvalInfo->argCount * sizeof(DebuggerIPCE_FuncEvalArgData);
+ else if (pEvalInfo->funcEvalType == DB_IPCE_FET_NEW_STRING)
+ argDataAreaSize += pEvalInfo->stringSize;
+ else if (pEvalInfo->funcEvalType == DB_IPCE_FET_NEW_ARRAY)
+ argDataAreaSize += pEvalInfo->arrayRank * sizeof(SIZE_T);
+
+ if (argDataAreaSize > 0)
+ {
+ pDE->m_argData = new (interopsafe, nothrow) BYTE[argDataAreaSize];
+
+ if (pDE->m_argData == NULL)
+ {
+ DeleteInteropSafeExecutable(pDE);
+ return E_OUTOFMEMORY;
+ }
+
+ // Pass back the address of the argument data area so the right side can write to it for us.
+ *argDataArea = pDE->m_argData;
+ }
+
+ // Set the thread's IP (in the filter context) to our hijack function if we're stopped due to a breakpoint or single
+ // step.
+ if (!fInException)
+ {
+ _ASSERTE(filterContext != NULL);
+
+ ::SetIP(filterContext, (UINT_PTR)GetEEFuncEntryPoint(::FuncEvalHijack));
+
+ // Don't be fooled into thinking you can push things onto the thread's stack now. If the thread is stopped at a
+ // breakpoint or from a single step, then its really suspended in the SEH filter. ESP in the thread's CONTEXT,
+ // therefore, points into the middle of the thread's current stack. So we pass things we need in the hijack in
+ // the thread's registers.
+
+ // Set the first argument to point to the DebuggerEval.
+#if defined(_TARGET_X86_)
+ filterContext->Eax = (DWORD)pDE;
+#elif defined(_TARGET_AMD64_)
+ filterContext->Rcx = (SIZE_T)pDE;
+#elif defined(_TARGET_ARM_)
+ filterContext->R0 = (DWORD)pDE;
+#else
+ PORTABILITY_ASSERT("Debugger::FuncEvalSetup is not implemented on this platform.");
+#endif
+
+ //
+ // To prevent GCs until the func-eval gets a chance to run, we increment the counter here.
+ // We only need to do this if we have changed the filter CONTEXT, since the stack will be unwalkable
+ // in this case.
+ //
+ g_pDebugger->IncThreadsAtUnsafePlaces();
+ }
+ else
+ {
+ HRESULT hr = CheckInitPendingFuncEvalTable();
+
+ if (FAILED(hr))
+ {
+ DeleteInteropSafeExecutable(pDE); // Note this runs the destructor for DebuggerEval, which releases its internal buffers
+ return (hr);
+ }
+ // If we're in an exception, then add a pending eval for this thread. This will cause us to perform the func
+ // eval when the user continues the process after the current exception event.
+ GetPendingEvals()->AddPendingEval(pDE->m_thread, pDE);
+ }
+
+
+ // Return that all went well. Tracing the stack at this point should not show that the func eval is setup, but it
+ // will show a wrong IP, so it shouldn't be done.
+ *debuggerEvalKey = pDE;
+
+ LOG((LF_CORDB, LL_INFO100000, "D:FES for pDE:%08x evalType:%d on thread %#x, id=0x%x\n",
+ pDE, pDE->m_evalType, pThread, GetThreadIdHelper(pThread)));
+
+ return S_OK;
+}
+
+//
+// FuncEvalSetupReAbort sets up a function evaluation specifically to rethrow a ThreadAbortException on the given
+// thread.
+//
+HRESULT Debugger::FuncEvalSetupReAbort(Thread *pThread, Thread::ThreadAbortRequester requester)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000,
+ "D::FESRA: performing reabort on thread %#x, id=0x%x\n",
+ pThread, GetThreadIdHelper(pThread)));
+
+ // The thread has to be at a GC safe place. It should be, since this is only done in response to a previous eval
+ // completing with a ThreadAbortException.
+ if (!g_pDebugger->IsThreadAtSafePlace(pThread))
+ return CORDBG_E_ILLEGAL_AT_GC_UNSAFE_POINT;
+
+ // Grab the filter context.
+ CONTEXT *filterContext = GetManagedStoppedCtx(pThread);
+
+ if (filterContext == NULL)
+ {
+ return CORDBG_E_ILLEGAL_AT_GC_UNSAFE_POINT;
+ }
+
+ // Create a DebuggerEval to hold info about this eval while its in progress. Constructor copies the thread's
+ // CONTEXT.
+ DebuggerEval *pDE = new (interopsafeEXEC, nothrow) DebuggerEval(filterContext, pThread, requester);
+
+ if (pDE == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ else if (!pDE->Init())
+ {
+ // We fail to change the m_breakpointInstruction field to PAGE_EXECUTE_READWRITE permission.
+ return E_FAIL;
+ }
+
+ // Set the thread's IP (in the filter context) to our hijack function.
+ _ASSERTE(filterContext != NULL);
+
+ ::SetIP(filterContext, (UINT_PTR)GetEEFuncEntryPoint(::FuncEvalHijack));
+
+#ifdef _TARGET_X86_ // reliance on filterContext->Eip & Eax
+ // Set EAX to point to the DebuggerEval.
+ filterContext->Eax = (DWORD)pDE;
+#elif defined(_TARGET_AMD64_)
+ // Set RCX to point to the DebuggerEval.
+ filterContext->Rcx = (SIZE_T)pDE;
+#elif defined(_TARGET_ARM_)
+ filterContext->R0 = (DWORD)pDE;
+#else
+ PORTABILITY_ASSERT("FuncEvalSetupReAbort (Debugger.cpp) is not implemented on this platform.");
+#endif
+
+ // Now clear the bit requesting a re-abort
+ pThread->ResetThreadStateNC(Thread::TSNC_DebuggerReAbort);
+
+ g_pDebugger->IncThreadsAtUnsafePlaces();
+
+ // Return that all went well. Tracing the stack at this point should not show that the func eval is setup, but it
+ // will show a wrong IP, so it shouldn't be done.
+
+ return S_OK;
+}
+
+//
+// FuncEvalAbort: Does a gentle abort of a func-eval already in progress.
+// Because this type of abort waits for the thread to get to a good state,
+// it may never return, or may time out.
+//
+
+//
+// Wait at most 0.5 seconds.
+//
+#define FUNC_EVAL_DEFAULT_TIMEOUT_VALUE 500
+
+HRESULT
+Debugger::FuncEvalAbort(
+ DebuggerEval *debuggerEvalKey
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ DebuggerEval *pDE = (DebuggerEval*) debuggerEvalKey;
+ HRESULT hr = S_OK;
+ CHECK_IF_CAN_TAKE_HELPER_LOCKS_IN_THIS_SCOPE(&hr, GetCanary());
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+
+ if (pDE->m_aborting == DebuggerEval::FE_ABORT_NONE)
+ {
+ // Remember that we're aborting this func eval.
+ pDE->m_aborting = DebuggerEval::FE_ABORT_NORMAL;
+
+ LOG((LF_CORDB, LL_INFO1000,
+ "D::FEA: performing UserAbort on thread %#x, id=0x%x\n",
+ pDE->m_thread, GetThreadIdHelper(pDE->m_thread)));
+
+ if (!g_fProcessDetach && !pDE->m_completed)
+ {
+ //
+ // Perform a stop on the thread that the eval is running on.
+ // This will cause a ThreadAbortException to be thrown on the thread.
+ //
+ EX_TRY
+ {
+ hr = pDE->m_thread->UserAbort(Thread::TAR_FuncEval, EEPolicy::TA_Safe, (DWORD)FUNC_EVAL_DEFAULT_TIMEOUT_VALUE, Thread::UAC_Normal);
+ if (hr == HRESULT_FROM_WIN32(ERROR_TIMEOUT))
+ {
+ hr = S_OK;
+ }
+ }
+ EX_CATCH
+ {
+ _ASSERTE(!"Unknown exception from UserAbort(), not expected");
+ }
+ EX_END_CATCH(EX_RETHROW);
+
+ }
+
+ LOG((LF_CORDB, LL_INFO1000, "D::FEA: UserAbort complete.\n"));
+ }
+
+ return hr;
+}
+
+//
+// FuncEvalRudeAbort: Does a rude abort of a func-eval in progress. This
+// leaves the thread in an undetermined state.
+//
+HRESULT
+Debugger::FuncEvalRudeAbort(
+ DebuggerEval *debuggerEvalKey
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+ CHECK_IF_CAN_TAKE_HELPER_LOCKS_IN_THIS_SCOPE(&hr, GetCanary());
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+
+ DebuggerEval *pDE = debuggerEvalKey;
+
+
+ if (!(pDE->m_aborting & DebuggerEval::FE_ABORT_RUDE))
+ {
+ //
+ // Remember that we're aborting this func eval.
+ //
+ pDE->m_aborting = (DebuggerEval::FUNC_EVAL_ABORT_TYPE)(pDE->m_aborting | DebuggerEval::FE_ABORT_RUDE);
+
+ LOG((LF_CORDB, LL_INFO1000,
+ "D::FEA: performing RudeAbort on thread %#x, id=0x%x\n",
+ pDE->m_thread, Debugger::GetThreadIdHelper(pDE->m_thread)));
+
+ if (!g_fProcessDetach && !pDE->m_completed)
+ {
+ //
+ // Perform a stop on the thread that the eval is running on.
+ // This will cause a ThreadAbortException to be thrown on the thread.
+ //
+ EX_TRY
+ {
+ hr = pDE->m_thread->UserAbort(Thread::TAR_FuncEval, EEPolicy::TA_Rude, (DWORD)FUNC_EVAL_DEFAULT_TIMEOUT_VALUE, Thread::UAC_Normal);
+ if (hr == HRESULT_FROM_WIN32(ERROR_TIMEOUT))
+ {
+ hr = S_OK;
+ }
+ }
+ EX_CATCH
+ {
+ _ASSERTE(!"Unknown exception from UserAbort(), not expected");
+ EX_RETHROW;
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+ }
+
+ LOG((LF_CORDB, LL_INFO1000, "D::FEA: RudeAbort complete.\n"));
+ }
+
+ return hr;
+}
+
+//
+// FuncEvalCleanup cleans up after a function evaluation is released.
+//
+HRESULT Debugger::FuncEvalCleanup(DebuggerEval *debuggerEvalKey)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DebuggerEval *pDE = debuggerEvalKey;
+
+ _ASSERTE(pDE->m_completed);
+
+ LOG((LF_CORDB, LL_INFO1000, "D::FEC: pDE:%08x 0x%08x, id=0x%x\n",
+ pDE, pDE->m_thread, GetThreadIdHelper(pDE->m_thread)));
+
+ DeleteInteropSafeExecutable(pDE);
+
+ return S_OK;
+}
+
+#endif // ifndef DACCESS_COMPILE
+
+//
+// SetReference sets an object reference for the Right Side,
+// respecting the write barrier for references that are in the heap.
+//
+HRESULT Debugger::SetReference(void *objectRefAddress,
+ VMPTR_OBJECTHANDLE vmObjectHandle,
+ void *newReference)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ hr = ValidateObject((Object *)newReference);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+
+ // If the object ref isn't in a handle, then go ahead and use
+ // SetObjectReference.
+ if (vmObjectHandle.IsNull())
+ {
+ OBJECTREF *dst = (OBJECTREF*)objectRefAddress;
+ OBJECTREF src = *((OBJECTREF*)&newReference);
+
+ SetObjectReferenceUnchecked(dst, src);
+ }
+ else
+ {
+
+ // If the object reference to set is inside of a handle, then
+ // fixup the handle.
+ OBJECTHANDLE h = vmObjectHandle.GetRawPtr();
+ OBJECTREF src = *((OBJECTREF*)&newReference);
+ HndAssignHandle(h, src);
+ }
+
+ return S_OK;
+}
+
+//
+// SetValueClass sets a value class for the Right Side, respecting the write barrier for references that are embedded
+// within in the value class.
+//
+HRESULT Debugger::SetValueClass(void *oldData, void *newData, DebuggerIPCE_BasicTypeData * type)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ TypeHandle th;
+ hr = BasicTypeInfoToTypeHandle(type, &th);
+
+ if (FAILED(hr))
+ return CORDBG_E_CLASS_NOT_LOADED;
+
+ // Update the value class.
+ CopyValueClassUnchecked(oldData, newData, th.GetMethodTable());
+
+ // Free the buffer that is holding the new data. This is a buffer that was created in response to a GET_BUFFER
+ // message, so we release it with ReleaseRemoteBuffer.
+ ReleaseRemoteBuffer((BYTE*)newData, true);
+
+ return hr;
+}
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+HRESULT Debugger::SetILInstrumentedCodeMap(MethodDesc *fd,
+ BOOL fStartJit,
+ ULONG32 cILMapEntries,
+ COR_IL_MAP rgILMapEntries[])
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS_FROM_GETJITINFO;
+ }
+ CONTRACTL_END;
+
+ if (!HasLazyData())
+ {
+ DebuggerLockHolder dbgLockHolder(this);
+ // This is an entry path into the debugger, so make sure we're inited.
+ LazyInit();
+ }
+
+ DebuggerMethodInfo * dmi = GetOrCreateMethodInfo(fd->GetModule(), fd->GetMemberDef());
+ if (dmi == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ dmi->SetInstrumentedILMap(rgILMapEntries, cILMapEntries);
+
+ return S_OK;
+}
+
+//
+// EarlyHelperThreadDeath handles the case where the helper
+// thread has been ripped out from underneath of us by
+// ExitProcess or TerminateProcess. These calls are bad, whacking
+// all threads except the caller in the process. This can happen, for
+// instance, when an app calls ExitProcess. All threads are wacked,
+// the main thread calls all DLL main's, and the EE starts shutting
+// down in its DLL main with the helper thread terminated.
+//
+void Debugger::EarlyHelperThreadDeath(void)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (m_pRCThread)
+ m_pRCThread->EarlyHelperThreadDeath();
+}
+
+//
+// This tells the debugger that shutdown of the in-proc debugging services has begun. We need to know this during
+// managed/unmanaged debugging so we can stop doing certian things to the process (like hijacking threads.)
+//
+void Debugger::ShutdownBegun(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ }
+ CONTRACTL_END;
+
+
+ // Shouldn't be Debugger-stopped if we're shutting down.
+ // However, shutdown can occur in preemptive mode. Thus if the RS does an AsyncBreak late
+ // enough, then the LS will appear to be stopped but may still shutdown.
+ // Since the debuggee can exit asynchronously at any time (eg, suppose somebody forcefully
+ // kills it with taskman), this doesn't introduce a new case.
+ // That aside, it would be great to be able to assert this:
+ //_ASSERTE(!IsStopped());
+
+ if (m_pRCThread != NULL)
+ {
+ DebuggerIPCControlBlock *dcb = m_pRCThread->GetDCB();
+
+ if ((dcb != NULL) && (dcb->m_rightSideIsWin32Debugger))
+ dcb->m_shutdownBegun = true;
+ }
+}
+
+/*
+ * LockDebuggerForShutdown
+ *
+ * This routine is used during shutdown to tell the in-process portion of the
+ * debugger to synchronize with any threads that are currently using the
+ * debugging facilities such that no more threads will run debugging services.
+ *
+ * This is accomplished by transitioning the debugger lock in to a state where
+ * it will block all threads, except for the finalizer, shutdown, and helper thread.
+ */
+void Debugger::LockDebuggerForShutdown(void)
+{
+#ifndef DACCESS_COMPILE
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DebuggerLockHolder dbgLockHolder(this);
+
+ // Shouldn't be Debugger-stopped if we're shutting down.
+ // However, shutdown can occur in preemptive mode. Thus if the RS does an AsyncBreak late
+ // enough, then the LS will appear to be stopped but may still shutdown.
+ // Since the debuggee can exit asynchronously at any time (eg, suppose somebody forcefully
+ // kills it with taskman), this doesn't introduce a new case.
+ // That aside, it would be great to be able to assert this:
+ //_ASSERTE(!IsStopped());
+
+ // After setting this flag, nonspecial threads will not be able to
+ // take the debugger lock.
+ m_fShutdownMode = true;
+
+ m_ignoreThreadDetach = TRUE;
+#else
+ DacNotImpl();
+#endif
+}
+
+
+/*
+ * DisableDebugger
+ *
+ * This routine is used by the EE to inform the debugger that it should block all
+ * threads from executing as soon as it can. Any thread entering the debugger can
+ * block infinitely, as well.
+ *
+ * This is accomplished by transitioning the debugger lock into a mode where it will
+ * block all threads infinitely rather than taking the lock.
+ *
+ */
+void Debugger::DisableDebugger(void)
+{
+#ifndef DACCESS_COMPILE
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_INTOLERANT;
+ PRECONDITION(ThisMaybeHelperThread());
+ }
+ CONTRACTL_END;
+
+ m_fDisabled = true;
+
+ CORDBDebuggerSetUnrecoverableError(this, CORDBG_E_DEBUGGING_DISABLED, false);
+
+#else
+ DacNotImpl();
+#endif
+}
+
+
+/****************************************************************************
+ * This will perform the duties of the helper thread if none already exists.
+ * This is called in the case that the loader lock is held and so no new
+ * threads can be spun up to be the helper thread, so the existing thread
+ * must be the helper thread until a new one can spin up.
+ * This is also called in the shutdown case (g_fProcessDetach==true) and our
+ * helper may have already been blown away.
+ ***************************************************************************/
+void Debugger::DoHelperThreadDuty()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+ // This should not be a real helper thread.
+ _ASSERTE(!IsDbgHelperSpecialThread());
+ _ASSERTE(ThreadHoldsLock());
+
+ // We may be here in the shutdown case (only if the shutdown started after we got here).
+ // We'll get killed randomly anyways, so not much we can do.
+
+ // These assumptions are based off us being called from TART.
+ _ASSERTE(ThreadStore::HoldingThreadStore() || g_fProcessDetach); // got this from TART
+ _ASSERTE(m_trappingRuntimeThreads); // We're only called from TART.
+ _ASSERTE(!m_stopped); // we haven't sent the sync-complete yet.
+
+ // Can't have 2 threads doing helper duty.
+ _ASSERTE(m_pRCThread->GetDCB()->m_temporaryHelperThreadId == 0);
+
+ LOG((LF_CORDB, LL_INFO1000,
+ "D::SSCIPCE: helper thread is not ready, doing helper "
+ "thread duty...\n"));
+
+ // We're the temporary helper thread now.
+ DWORD dwMyTID = GetCurrentThreadId();
+ m_pRCThread->GetDCB()->m_temporaryHelperThreadId = dwMyTID;
+
+ // Make sure the helper thread has something to wait on while
+ // we're trying to be the helper thread.
+ VERIFY(ResetEvent(m_pRCThread->GetHelperThreadCanGoEvent()));
+
+ // We have not sent the sync-complete flare yet.
+
+ // Now that we've synchronized, we'll eventually send the sync-complete. But we're currently within the
+ // scope of sombody already sending an event. So unlock from that event so that we can send the sync-complete.
+ // Don't release the debugger lock
+ //
+ UnlockFromEventSending(NULL);
+
+ // We are the temporary helper thread. We will not deal with everything! But just pump for
+ // continue.
+ //
+ m_pRCThread->TemporaryHelperThreadMainLoop();
+
+ // We do not need to relock it since we never release it.
+ LockForEventSending(NULL);
+ _ASSERTE(ThreadHoldsLock());
+
+
+ STRESS_LOG1(LF_CORDB, LL_INFO1000,
+ "D::SSCIPCE: done doing helper thread duty. "
+ "Current helper thread id=0x%x\n",
+ m_pRCThread->GetDCB()->m_helperThreadId);
+
+ // We're not the temporary helper thread anymore.
+ _ASSERTE(m_pRCThread->GetDCB()->m_temporaryHelperThreadId == dwMyTID);
+ m_pRCThread->GetDCB()->m_temporaryHelperThreadId = 0;
+
+ // Let the helper thread go if its waiting on us.
+ VERIFY(SetEvent(m_pRCThread->GetHelperThreadCanGoEvent()));
+}
+
+
+
+// This function is called from the EE to notify the right side
+// whenever the name of a thread or AppDomain changes
+//
+// Notes:
+// This just sends a ping event to notify that the name has been changed.
+// It does not send the actual updated name. Instead, the debugger can query for the name.
+//
+// For an AppDomain name change:
+// - pAppDoamin != NULL
+// - name retrieved via ICorDebugAppDomain::GetName
+//
+// For a Thread name change:
+// - pAppDomain == NULL, pThread != NULL
+// - name retrieved via a func-eval of Thread::get_Name
+HRESULT Debugger::NameChangeEvent(AppDomain *pAppDomain, Thread *pThread)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ // Don't try to send one of these if the thread really isn't setup
+ // yet. This can happen when initially setting up an app domain,
+ // before the appdomain create event has been sent. Since the app
+ // domain create event hasn't been sent yet in this case, its okay
+ // to do this...
+ if (g_pEEInterface->GetThread() == NULL)
+ return S_OK;
+
+ // Skip if thread doesn't yet have native ID.
+ // This can easily happen if an app sets Thread.Name before it calls Thread.Start.
+ // Since this is just a ping-event, it's ignorable. The debugger can query the thread name at Thread.Start in this case.
+ // This emulates whidbey semantics.
+ if (pThread != NULL)
+ {
+ if (pThread->GetOSThreadId() == 0)
+ {
+ return S_OK;
+ }
+ }
+
+ LOG((LF_CORDB, LL_INFO1000, "D::NCE: Sending NameChangeEvent 0x%x 0x%x\n",
+ pAppDomain, pThread));
+
+ Thread *curThread = g_pEEInterface->GetThread();
+ SENDIPCEVENT_BEGIN(this, curThread);
+
+ if (CORDebuggerAttached())
+ {
+
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce,
+ DB_IPCE_NAME_CHANGE,
+ curThread,
+ curThread->GetDomain());
+
+
+ if (pAppDomain)
+ {
+ ipce->NameChange.eventType = APP_DOMAIN_NAME_CHANGE;
+ ipce->NameChange.vmAppDomain.SetRawPtr(pAppDomain);
+ }
+ else
+ {
+ // Thread Name
+ ipce->NameChange.eventType = THREAD_NAME_CHANGE;
+ _ASSERTE (pThread);
+ ipce->NameChange.vmThread.SetRawPtr(pThread);
+ }
+
+ m_pRCThread->SendIPCEvent();
+
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO1000, "D::NCE: Skipping SendIPCEvent because RS detached."));
+ }
+
+ SENDIPCEVENT_END;
+
+ return S_OK;
+
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Send an event to the RS indicating that there's a Ctrl-C or Ctrl-Break.
+//
+// Arguments:
+// dwCtrlType - represents the type of the event (Ctrl-C or Ctrl-Break)
+//
+// Return Value:
+// Return TRUE if the event has been handled by the debugger.
+//
+
+BOOL Debugger::SendCtrlCToDebugger(DWORD dwCtrlType)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000, "D::SCCTD: Sending CtrlC Event 0x%x\n", dwCtrlType));
+
+ // Prevent other Runtime threads from handling events.
+ Thread *pThread = g_pEEInterface->GetThread();
+ SENDIPCEVENT_BEGIN(this, pThread);
+
+ if (CORDebuggerAttached())
+ {
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce,
+ DB_IPCE_CONTROL_C_EVENT,
+ pThread,
+ NULL);
+
+ // The RS doesn't do anything with dwCtrlType
+ m_pRCThread->SendIPCEvent();
+
+ // Stop all Runtime threads
+ TrapAllRuntimeThreads();
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO1000, "D::SCCTD: Skipping SendIPCEvent because RS detached."));
+ }
+
+ SENDIPCEVENT_END;
+
+ // now wait for notification from the right side about whether or not
+ // the out-of-proc debugger is handling ControlC events.
+ WaitForSingleObjectHelper(GetCtrlCMutex(), INFINITE);
+
+ return GetDebuggerHandlingCtrlC();
+}
+
+// Allows the debugger to keep an up to date list of special threads
+HRESULT Debugger::UpdateSpecialThreadList(DWORD cThreadArrayLength,
+ DWORD *rgdwThreadIDArray)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(g_pRCThread != NULL);
+
+ DebuggerIPCControlBlock *pIPC = g_pRCThread->GetDCB();
+ _ASSERTE(pIPC);
+
+ if (!pIPC)
+ return (E_FAIL);
+
+ // Save the thread list information, and mark the dirty bit so
+ // the right side knows.
+ pIPC->m_specialThreadList = rgdwThreadIDArray;
+ pIPC->m_specialThreadListLength = cThreadArrayLength;
+ pIPC->m_specialThreadListDirty = true;
+
+ return (S_OK);
+}
+
+// Updates the pointer for the debugger services
+void Debugger::SetIDbgThreadControl(IDebuggerThreadControl *pIDbgThreadControl)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+ if (m_pIDbgThreadControl)
+ m_pIDbgThreadControl->Release();
+
+ m_pIDbgThreadControl = pIDbgThreadControl;
+
+ if (m_pIDbgThreadControl)
+ m_pIDbgThreadControl->AddRef();
+}
+
+//
+// If a thread is Win32 suspended right after hitting a breakpoint instruction, but before the OS has transitioned the
+// thread over to the user-level exception dispatching logic, then we may see the IP pointing after the breakpoint
+// instruction. There are times when the Runtime will use the IP to try to determine what code as run in the prolog or
+// epilog, most notably when unwinding a frame. If the thread is suspended in such a case, then the unwind will believe
+// that the instruction that the breakpoint replaced has really been executed, which is not true. This confuses the
+// unwinding logic. This function is called from Thread::HandledJITCase() to help us recgonize when this may have
+// happened and allow us to skip the unwind and abort the HandledJITCase.
+//
+// The criteria is this:
+//
+// 1) If a debugger is attached.
+//
+// 2) If the instruction before the IP is a breakpoint instruction.
+//
+// 3) If the IP is in the prolog or epilog of a managed function.
+//
+BOOL Debugger::IsThreadContextInvalid(Thread *pThread)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ BOOL invalid = FALSE;
+
+ // Get the thread context.
+ CONTEXT ctx;
+ ctx.ContextFlags = CONTEXT_CONTROL;
+ BOOL success = pThread->GetThreadContext(&ctx);
+
+ if (success)
+ {
+ // Check single-step flag
+ if (IsSSFlagEnabled(reinterpret_cast<DT_CONTEXT *>(&ctx) ARM_ARG(pThread)))
+ {
+ // Can't hijack a thread whose SS-flag is set. This could lead to races
+ // with the thread taking the SS-exception.
+ // The debugger's controller filters will poll for GC to avoid starvation.
+ STRESS_LOG0(LF_CORDB, LL_EVERYTHING, "HJC - Hardware trace flag applied\n");
+ return TRUE;
+ }
+ }
+
+ if (success)
+ {
+#ifdef _TARGET_X86_
+ // Grab Eip - 1
+ LPVOID address = (((BYTE*)GetIP(&ctx)) - 1);
+
+ EX_TRY
+ {
+ // Use AVInRuntimeImplOkHolder.
+ AVInRuntimeImplOkayHolder AVOkay;
+
+ // Is it a breakpoint?
+ if (AddressIsBreakpoint((CORDB_ADDRESS_TYPE*)address))
+ {
+ size_t prologSize; // Unused...
+ if (g_pEEInterface->IsInPrologOrEpilog((BYTE*)GetIP(&ctx), &prologSize))
+ {
+ LOG((LF_CORDB, LL_INFO1000, "D::ITCI: thread is after a BP and in prolog or epilog.\n"));
+ invalid = TRUE;
+ }
+ }
+ }
+ EX_CATCH
+ {
+ // If we fault trying to read the byte before EIP, then we know that its not a breakpoint.
+ // Do nothing. The default return value is FALSE.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+#else // _TARGET_X86_
+ // Non-x86 can detect whether the thread is suspended after an exception is hit but before
+ // the kernel has dispatched the exception to user mode by trap frame reporting.
+ // See Thread::IsContextSafeToRedirect().
+#endif // _TARGET_X86_
+ }
+ else
+ {
+ // If we can't get the context, then its definetly invalid... ;)
+ LOG((LF_CORDB, LL_INFO1000, "D::ITCI: couldn't get thread's context!\n"));
+ invalid = TRUE;
+ }
+
+ return invalid;
+}
+
+
+// notification when a SQL connection begins
+void Debugger::CreateConnection(CONNID dwConnectionId, __in_z WCHAR *wzName)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_INFO1000, "D::CreateConnection %d\n.", dwConnectionId));
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ Thread *pThread = g_pEEInterface->GetThread();
+ SENDIPCEVENT_BEGIN(this, pThread);
+
+ if (CORDebuggerAttached())
+ {
+ DebuggerIPCEvent* ipce;
+
+ // Send a update module syns event to the Right Side.
+ ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce, DB_IPCE_CREATE_CONNECTION,
+ pThread,
+ NULL);
+ ipce->CreateConnection.connectionId = dwConnectionId;
+ _ASSERTE(wzName != NULL);
+ ipce->CreateConnection.wzConnectionName.SetString(wzName);
+
+ m_pRCThread->SendIPCEvent();
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO1000, "D::CreateConnection: Skipping SendIPCEvent because RS detached."));
+ }
+
+ // Stop all Runtime threads if we actually sent an event
+ if (CORDebuggerAttached())
+ {
+ TrapAllRuntimeThreads();
+ }
+
+ SENDIPCEVENT_END;
+}
+
+// notification when a SQL connection ends
+void Debugger::DestroyConnection(CONNID dwConnectionId)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_INFO1000, "D::DestroyConnection %d\n.", dwConnectionId));
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ Thread *thread = g_pEEInterface->GetThread();
+ // Note that the debugger lock is reentrant, so we may or may not hold it already.
+ SENDIPCEVENT_BEGIN(this, thread);
+
+ // Send a update module syns event to the Right Side.
+ DebuggerIPCEvent* ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce, DB_IPCE_DESTROY_CONNECTION,
+ thread,
+ NULL);
+ ipce->ConnectionChange.connectionId = dwConnectionId;
+
+ // IPC event is now initialized, so we can send it over.
+ SendSimpleIPCEventAndBlock();
+
+ // This will block on the continue
+ SENDIPCEVENT_END;
+
+}
+
+// notification for SQL connection changes
+void Debugger::ChangeConnection(CONNID dwConnectionId)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT;
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_INFO1000, "D::ChangeConnection %d\n.", dwConnectionId));
+
+ if (CORDBUnrecoverableError(this))
+ return;
+
+ Thread *pThread = g_pEEInterface->GetThread();
+ SENDIPCEVENT_BEGIN(this, pThread);
+
+ if (CORDebuggerAttached())
+ {
+ DebuggerIPCEvent* ipce;
+
+ // Send a update module syns event to the Right Side.
+ ipce = m_pRCThread->GetIPCEventSendBuffer();
+ InitIPCEvent(ipce, DB_IPCE_CHANGE_CONNECTION,
+ pThread,
+ NULL);
+ ipce->ConnectionChange.connectionId = dwConnectionId;
+ m_pRCThread->SendIPCEvent();
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_INFO1000, "D::ChangeConnection: Skipping SendIPCEvent because RS detached."));
+ }
+
+ // Stop all Runtime threads if we actually sent an event
+ if (CORDebuggerAttached())
+ {
+ TrapAllRuntimeThreads();
+ }
+
+ SENDIPCEVENT_END;
+}
+
+
+//
+// Are we the helper thread?
+// Some important things about running on the helper thread:
+// - there's only 1, so guaranteed to be thread-safe.
+// - we'll never run managed code.
+// - therefore, Never GC.
+// - It listens for events from the RS.
+// - It's the only thread to send a sync complete.
+//
+bool ThisIsHelperThreadWorker(void)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ // This can
+ Thread * pThread;
+ pThread = GetThreadNULLOk();
+
+ // First check for a real helper thread. This will do a FLS access.
+ bool fIsHelperThread = !!IsDbgHelperSpecialThread();
+ if (fIsHelperThread)
+ {
+ // If we're on the real helper thread, we never run managed code
+ // and so we'd better not have an EE thread object.
+ _ASSERTE((pThread == NULL) || !"The helper thread should not being running managed code.\n"
+ "Are you running managed code inside the dllmain? If so, your scenario is invalid and this"
+ "assert is only the tip of the iceberg.\n");
+ return true;
+ }
+
+ // Even if we're not on the real helper thread, we may still be on a thread
+ // pretending to be the helper. (Helper Duty, etc).
+ DWORD id = GetCurrentThreadId();
+
+ // Check for temporary helper thread.
+ if (ThisIsTempHelperThread(id))
+ {
+ return true;
+ }
+
+ return false;
+}
+
+//
+// Make call to the static method.
+// This is exposed to the contracts susbsystem so that the helper thread can call
+// things on MODE_COOPERATIVE.
+//
+bool Debugger::ThisIsHelperThread(void)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return ThisIsHelperThreadWorker();
+}
+
+// Check if we're the temporary helper thread. Have 2 forms of this, 1 that assumes the current
+// thread (but has the overhead of an extra call to GetCurrentThreadId() if we laready know the tid.
+bool ThisIsTempHelperThread()
+{
+ WRAPPER_NO_CONTRACT;
+
+ DWORD id = GetCurrentThreadId();
+ return ThisIsTempHelperThread(id);
+}
+
+bool ThisIsTempHelperThread(DWORD tid)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // If helper thread class isn't created, then there's no helper thread.
+ // No one is doing helper thread duty either.
+ // It's also possible we're in a shutdown case and have already deleted the
+ // data for the helper thread.
+ if (g_pRCThread != NULL)
+ {
+ // May be the temporary helper thread...
+ DebuggerIPCControlBlock * pBlock = g_pRCThread->GetDCB();
+ if (pBlock != NULL)
+ {
+ DWORD idTemp = pBlock->m_temporaryHelperThreadId;
+
+ if (tid == idTemp)
+ {
+ return true;
+ }
+ }
+ }
+ return false;
+
+}
+
+
+// This function is called when host call ICLRSecurityAttributeManager::setDacl.
+// It will redacl our SSE, RSEA, RSER events.
+HRESULT Debugger::ReDaclEvents(PSECURITY_DESCRIPTOR securityDescriptor)
+{
+ WRAPPER_NO_CONTRACT;
+
+ return m_pRCThread->ReDaclEvents(securityDescriptor);
+}
+
+/* static */
+void Debugger::AcquireDebuggerDataLock(Debugger *pDebugger)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!g_fProcessDetach)
+ {
+ pDebugger->GetDebuggerDataLock()->Enter();
+ }
+}
+
+/* static */
+void Debugger::ReleaseDebuggerDataLock(Debugger *pDebugger)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (!g_fProcessDetach)
+ {
+ pDebugger->GetDebuggerDataLock()->Leave();
+ }
+}
+
+
+#else // DACCESS_COMPILE
+
+// determine whether the LS holds the data lock. If it does, we will assume the locked data is in an
+// inconsistent state and will throw an exception. The DAC will execute this if we are executing code
+// that takes the lock.
+// Arguments: input: pDebugger - the LS debugger data structure
+/* static */
+void Debugger::AcquireDebuggerDataLock(Debugger *pDebugger)
+{
+ SUPPORTS_DAC;
+
+ if (pDebugger->GetDebuggerDataLock()->GetEnterCount() != 0)
+ {
+ ThrowHR(CORDBG_E_PROCESS_NOT_SYNCHRONIZED);
+ }
+}
+#endif // DACCESS_COMPILE
+
+/* ------------------------------------------------------------------------ *
+ * DebuggerHeap impl
+ * ------------------------------------------------------------------------ */
+
+DebuggerHeap::DebuggerHeap()
+{
+#ifdef USE_INTEROPSAFE_HEAP
+ m_hHeap = NULL;
+#endif
+}
+
+
+DebuggerHeap::~DebuggerHeap()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Destroy();
+}
+
+void DebuggerHeap::Destroy()
+{
+#ifdef USE_INTEROPSAFE_HEAP
+ if (IsInit())
+ {
+ ::HeapDestroy(m_hHeap);
+ m_hHeap = NULL;
+ }
+#endif
+}
+
+bool DebuggerHeap::IsInit()
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef USE_INTEROPSAFE_HEAP
+ return m_hHeap != NULL;
+#else
+ return true;
+#endif
+}
+
+HRESULT DebuggerHeap::Init(BOOL fExecutable)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Have knob catch if we don't want to lazy init the debugger.
+ _ASSERTE(!g_DbgShouldntUseDebugger);
+
+#ifdef USE_INTEROPSAFE_HEAP
+ // If already inited, then we're done.
+ // We normally don't double-init. However, we may oom between when we allocate the heap and when we do other initialization.
+ // We don't worry about backout code to free the heap. Rather, we'll just leave it alive and nop if we try to allocate it again.
+ if (IsInit())
+ {
+ return S_OK;
+ }
+
+#ifndef HEAP_CREATE_ENABLE_EXECUTE
+#define HEAP_CREATE_ENABLE_EXECUTE 0x00040000 // winnt create heap with executable pages
+#endif
+
+ // Create a standard, grow-able, thread-safe heap.
+ DWORD dwFlags = ((fExecutable == TRUE)? HEAP_CREATE_ENABLE_EXECUTE : 0);
+ m_hHeap = ::HeapCreate(dwFlags, 0, 0);
+ if (m_hHeap == NULL)
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+#endif
+ return S_OK;
+}
+
+// Only use canaries on x86 b/c they throw of alignment on Ia64.
+#if defined(_DEBUG) && defined(_TARGET_X86_)
+#define USE_INTEROPSAFE_CANARY
+#endif
+
+#ifdef USE_INTEROPSAFE_CANARY
+// Small header to to prefix interop-heap blocks.
+// This lets us enforce that we don't delete interopheap data from a non-interop heap.
+struct InteropHeapCanary
+{
+ ULONGLONG m_canary;
+
+ // Raw address - this is what the heap alloc + free routines use.
+ // User address - this is what the user sees after we adjust the raw address for the canary
+
+ // Given a raw address to an allocated block, get the canary + mark it.
+ static InteropHeapCanary * GetFromRawAddr(void * pStart)
+ {
+ _ASSERTE(pStart != NULL);
+ InteropHeapCanary * p = (InteropHeapCanary*) pStart;
+ p->Mark();
+ return p;
+ }
+
+ // Get the raw address from this canary.
+ void * GetRawAddr()
+ {
+ return (void*) this;
+ }
+
+ // Get a canary from a start address.
+ static InteropHeapCanary * GetFromUserAddr(void * pStart)
+ {
+ _ASSERTE(pStart != NULL);
+ InteropHeapCanary * p = ((InteropHeapCanary*) pStart)-1;
+ p->Check();
+ return p;
+ }
+ void * GetUserAddr()
+ {
+ this->Check();
+ return (void*) (this + 1);
+ }
+
+protected:
+ void Check()
+ {
+ CONSISTENCY_CHECK_MSGF((m_canary == kInteropHeapCookie),
+ ("Using InteropSafe delete on non-interopsafe allocated memory.\n"));
+ }
+ void Mark()
+ {
+ m_canary = kInteropHeapCookie;
+ }
+ static const ULONGLONG kInteropHeapCookie = 0x12345678;
+};
+#endif // USE_INTEROPSAFE_CANARY
+
+void *DebuggerHeap::Alloc(DWORD size)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef USE_INTEROPSAFE_CANARY
+ // Make sure we allocate enough space for the canary at the start.
+ size += sizeof(InteropHeapCanary);
+#endif
+
+ void *ret;
+#ifdef USE_INTEROPSAFE_HEAP
+ _ASSERTE(m_hHeap != NULL);
+ ret = ::HeapAlloc(m_hHeap, HEAP_ZERO_MEMORY, size);
+#else // USE_INTEROPSAFE_HEAP
+
+#ifndef FEATURE_PAL
+ HANDLE hExecutableHeap = ClrGetProcessExecutableHeap();
+#else // !FEATURE_PAL
+ HANDLE hExecutableHeap = ClrGetProcessHeap();
+#endif // !FEATURE_PAL
+
+ if (hExecutableHeap == NULL)
+ {
+ return NULL;
+ }
+ ret = ClrHeapAlloc(hExecutableHeap, NULL, S_SIZE_T(size));
+#endif // USE_INTEROPSAFE_HEAP
+
+#ifdef USE_INTEROPSAFE_CANARY
+ if (ret == NULL)
+ {
+ return NULL;
+ }
+ InteropHeapCanary * pCanary = InteropHeapCanary::GetFromRawAddr(ret);
+ ret = pCanary->GetUserAddr();
+#endif
+
+ return ret;
+}
+
+
+// Realloc memory.
+// If this fails, the original memory is still valid.
+void *DebuggerHeap::Realloc(void *pMem, DWORD newSize, DWORD oldSize)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pMem != NULL);
+ _ASSERTE(newSize != 0);
+ _ASSERTE(oldSize != 0);
+
+#if defined(USE_INTEROPSAFE_HEAP) && !defined(USE_INTEROPSAFE_CANARY)
+ // No canaries in this case.
+ // Call into realloc.
+ void *ret;
+
+ _ASSERTE(m_hHeap != NULL);
+ ret = ::HeapReAlloc(m_hHeap, HEAP_ZERO_MEMORY, pMem, newSize);
+#else
+ // impl Realloc on top of alloc & free.
+ void *ret;
+
+ ret = this->Alloc(newSize);
+ if (ret == NULL)
+ {
+ // Not supposed to free original memory in failure condition.
+ return NULL;
+ }
+
+ memcpy(ret, pMem, oldSize);
+ this->Free(pMem);
+#endif
+
+ return ret;
+}
+
+void DebuggerHeap::Free(void *pMem)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef USE_INTEROPSAFE_CANARY
+ // Check for canary
+
+ if (pMem != NULL)
+ {
+ InteropHeapCanary * pCanary = InteropHeapCanary::GetFromUserAddr(pMem);
+ pMem = pCanary->GetRawAddr();
+ }
+#endif
+
+#ifdef USE_INTEROPSAFE_HEAP
+ if (pMem != NULL)
+ {
+ _ASSERTE(m_hHeap != NULL);
+ ::HeapFree(m_hHeap, 0, pMem);
+ }
+#else
+ if (pMem != NULL)
+ {
+ HANDLE hExecutableHeap = ClrGetProcessExecutableHeap();
+ _ASSERTE(hExecutableHeap != NULL);
+ ClrHeapFree(hExecutableHeap, NULL, pMem);
+ }
+#endif
+}
+
+#ifndef DACCESS_COMPILE
+
+
+// Undef this so we can call them from the EE versions.
+#undef UtilMessageBoxVA
+
+// Message box API for the left side of the debugger. This API handles calls from the
+// debugger helper thread as well as from normal EE threads. It is the only one that
+// should be used from inside the debugger left side.
+int Debugger::MessageBox(
+ UINT uText, // Resource Identifier for Text message
+ UINT uCaption, // Resource Identifier for Caption
+ UINT uType, // Style of MessageBox
+ BOOL displayForNonInteractive, // Display even if the process is running non interactive
+ BOOL showFileNameInTitle, // Flag to show FileName in Caption
+ ...) // Additional Arguments
+{
+ CONTRACTL
+ {
+ MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT;
+ MODE_PREEMPTIVE;
+ NOTHROW;
+
+ PRECONDITION(ThisMaybeHelperThread());
+ }
+ CONTRACTL_END;
+
+ va_list marker;
+ va_start(marker, showFileNameInTitle);
+
+ // Add the MB_TASKMODAL style to indicate that the dialog should be displayed on top of the windows
+ // owned by the current thread and should prevent interaction with them until dismissed.
+ uType |= MB_TASKMODAL;
+
+ int result = UtilMessageBoxVA(NULL, uText, uCaption, uType, displayForNonInteractive, showFileNameInTitle, marker);
+ va_end( marker );
+
+ return result;
+}
+
+// Redefine this to an error just in case code is added after this point in the file.
+#define UtilMessageBoxVA __error("Use g_pDebugger->MessageBox from inside the left side of the debugger")
+
+#else // DACCESS_COMPILE
+void
+Debugger::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ DAC_ENUM_VTHIS();
+ SUPPORTS_DAC;
+
+ if ( flags != CLRDATA_ENUM_MEM_TRIAGE)
+ {
+ if (m_pMethodInfos.IsValid())
+ {
+ m_pMethodInfos->EnumMemoryRegions(flags);
+ }
+
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_pLazyData),
+ sizeof(DebuggerLazyInit));
+ }
+
+ // Needed for stack walking from an initial native context. If the debugger can find the
+ // on-disk image of clr.dll, then this is not necessary.
+ DacEnumMemoryRegion(dac_cast<TADDR>(m_rgHijackFunction), sizeof(MemoryRange)*kMaxHijackFunctions);
+}
+
+
+// This code doesn't hang out in Frame/TransitionFrame/FuncEvalFrame::EnumMemoryRegions() like it would
+// for other normal VM objects because we don't want to have code in VM directly referencing LS types.
+// Frames.h's FuncEvalFrame simply does a forward decl of DebuggerEval and gets away with it because it
+// never does anything but a cast of a TADDR.
+void
+Debugger::EnumMemoryRegionsIfFuncEvalFrame(CLRDataEnumMemoryFlags flags, Frame * pFrame)
+{
+ SUPPORTS_DAC;
+
+ if ((pFrame != NULL) && (pFrame->GetFrameType() == Frame::TYPE_FUNC_EVAL))
+ {
+ FuncEvalFrame * pFEF = dac_cast<PTR_FuncEvalFrame>(pFrame);
+ DebuggerEval * pDE = pFEF->GetDebuggerEval();
+
+ if (pDE != NULL)
+ {
+ DacEnumMemoryRegion(dac_cast<TADDR>(pDE), sizeof(DebuggerEval), true);
+
+ if (pDE->m_debuggerModule != NULL)
+ DacEnumMemoryRegion(dac_cast<TADDR>(pDE->m_debuggerModule), sizeof(DebuggerModule), true);
+ }
+ }
+}
+
+#endif // #ifdef DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+void Debugger::StartCanaryThread()
+{
+ // we need to already have the rcthread running and the pointer stored
+ _ASSERTE(m_pRCThread != NULL && g_pRCThread == m_pRCThread);
+ _ASSERTE(m_pRCThread->GetDCB() != NULL);
+ _ASSERTE(GetCanary() != NULL);
+
+ GetCanary()->Init();
+}
+#endif // DACCESS_COMPILE
+
+#endif //DEBUGGING_SUPPORTED
diff --git a/src/debug/ee/debugger.h b/src/debug/ee/debugger.h
new file mode 100644
index 0000000000..29585de31d
--- /dev/null
+++ b/src/debug/ee/debugger.h
@@ -0,0 +1,3833 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: debugger.h
+//
+
+//
+// Header file for Runtime Controller classes of the COM+ Debugging Services.
+//
+//*****************************************************************************
+
+#ifndef DEBUGGER_H_
+#define DEBUGGER_H_
+
+#include <windows.h>
+
+#include <utilcode.h>
+
+#include <metahost.h>
+
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+#define LOGGING
+#endif
+
+#include <log.h>
+
+#include "cor.h"
+#include "corpriv.h"
+#include "daccess.h"
+
+#include "common.h"
+#include "winwrap.h"
+#include "threads.h"
+#include "frames.h"
+
+#include "appdomain.hpp"
+#include "eedbginterface.h"
+#include "dbginterface.h"
+#include "corhost.h"
+
+
+#include "corjit.h"
+#include <dbgmeta.h> // <TODO>need to rip this out of here...</TODO>
+
+#include "frameinfo.h"
+
+#include "dllimportcallback.h"
+
+#include "canary.h"
+#include "inprocdac.h"
+
+#undef ASSERT
+#define CRASH(x) _ASSERTE(!x)
+#define ASSERT(x) _ASSERTE(x)
+
+
+#ifndef TRACE_MEMORY
+#define TRACE_MEMORY 0
+#endif
+
+#if TRACE_MEMORY
+#define TRACE_ALLOC(p) LOG((LF_CORDB, LL_INFO10000, \
+ "--- Allocated %x at %s:%d\n", p, __FILE__, __LINE__));
+#define TRACE_FREE(p) LOG((LF_CORDB, LL_INFO10000, \
+ "--- Freed %x at %s:%d\n", p, __FILE__, __LINE__));
+#else
+#define TRACE_ALLOC(p)
+#define TRACE_FREE(p)
+#endif
+
+typedef CUnorderedArray<void*,11> UnorderedPtrArray;
+
+/* ------------------------------------------------------------------------ *
+ * Forward class declarations
+ * ------------------------------------------------------------------------ */
+
+class DebuggerFrame;
+class DebuggerModule;
+class DebuggerModuleTable;
+class Debugger;
+class DebuggerBreakpoint;
+class DebuggerPendingFuncEvalTable;
+class DebuggerRCThread;
+class DebuggerStepper;
+class DebuggerMethodInfo;
+class DebuggerJitInfo;
+class DebuggerMethodInfoTable;
+struct DebuggerControllerPatch;
+class DebuggerEval;
+class DebuggerControllerQueue;
+class DebuggerController;
+class Crst;
+
+typedef CUnorderedArray<DebuggerControllerPatch *, 17> PATCH_UNORDERED_ARRAY;
+template<class T> void DeleteInteropSafe(T *p);
+template<class T> void DeleteInteropSafeExecutable(T *p);
+
+typedef VPTR(class Debugger) PTR_Debugger;
+typedef DPTR(struct DebuggerILToNativeMap) PTR_DebuggerILToNativeMap;
+typedef DPTR(class DebuggerMethodInfo) PTR_DebuggerMethodInfo;
+typedef VPTR(class DebuggerMethodInfoTable) PTR_DebuggerMethodInfoTable;
+typedef DPTR(class DebuggerJitInfo) PTR_DebuggerJitInfo;
+typedef DPTR(class DebuggerEval) PTR_DebuggerEval;
+typedef DPTR(struct DebuggerIPCControlBlock) PTR_DebuggerIPCControlBlock;
+
+
+/* ------------------------------------------------------------------------ *
+ * Global variables
+ * ------------------------------------------------------------------------ */
+
+GPTR_DECL(Debugger, g_pDebugger);
+GPTR_DECL(EEDebugInterface, g_pEEInterface);
+GVAL_DECL(HANDLE, g_hContinueStartupEvent);
+extern DebuggerRCThread *g_pRCThread;
+
+//---------------------------------------------------------------------------------------
+// Holder to ensure our calls to IncThreadsAtUnsafePlaces and DecThreadsAtUnsafePlaces
+class AtSafePlaceHolder
+{
+public:
+ AtSafePlaceHolder(Thread * pThread);
+
+ // Clear the holder.
+ ~AtSafePlaceHolder();
+
+ // True if the holder is acquired.
+ bool IsAtUnsafePlace();
+
+ // Clear the holder (call DecThreadsAtUnsafePlaces if needed)
+ void Clear();
+
+private:
+ // If this is non-null, then the holder incremented the unsafe counter and it needs
+ // to decrement it.
+ Thread * m_pThreadAtUnsafePlace;
+};
+
+
+template<BOOL COOPERATIVE, BOOL TOGGLE, BOOL IFTHREAD>
+class GCHolderEEInterface
+{
+public:
+ DEBUG_NOINLINE GCHolderEEInterface();
+ DEBUG_NOINLINE ~GCHolderEEInterface();
+};
+
+#ifndef DACCESS_COMPILE
+template<BOOL TOGGLE, BOOL IFTHREAD>
+class GCHolderEEInterface<TRUE, TOGGLE, IFTHREAD>
+{
+private:
+ bool startInCoop;
+
+public:
+ DEBUG_NOINLINE GCHolderEEInterface()
+ {
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_MODE_COOPERATIVE;
+
+ if (IFTHREAD && g_pEEInterface->GetThread() == NULL)
+ {
+ return;
+ }
+
+ startInCoop = false;
+
+ if (g_pEEInterface->IsPreemptiveGCDisabled())
+ {
+ // we're starting in COOP, no need to switch
+ startInCoop = true;
+ }
+ else
+ {
+ // we're starting in PREEMP, need to switch to COOP
+ startInCoop = false;
+ g_pEEInterface->DisablePreemptiveGC();
+ }
+ };
+
+ DEBUG_NOINLINE ~GCHolderEEInterface()
+ {
+ SCAN_SCOPE_END;
+
+ if (IFTHREAD && g_pEEInterface->GetThread() == NULL)
+ {
+ return;
+ }
+
+ _ASSERT(g_pEEInterface->IsPreemptiveGCDisabled());
+
+ if (TOGGLE)
+ {
+ // We're in COOP, toggle to PREEMPTIVE and back to COOP
+ // for synch purposes.
+ g_pEEInterface->EnablePreemptiveGC();
+ g_pEEInterface->DisablePreemptiveGC();
+
+ // If we started in PREEMPTIVE switch back
+ if (!startInCoop)
+ {
+ g_pEEInterface->EnablePreemptiveGC();
+ }
+ }
+ else
+ {
+ // If we started in PREEMPTIVE switch back
+ if (!startInCoop)
+ {
+ g_pEEInterface->EnablePreemptiveGC();
+ }
+ }
+ };
+};
+
+template<BOOL TOGGLE, BOOL IFTHREAD>
+class GCHolderEEInterface<FALSE, TOGGLE, IFTHREAD>
+{
+private:
+ bool startInCoop;
+ bool conditional;
+
+ void EnterInternal(bool bStartInCoop, bool bConditional)
+ {
+ startInCoop = bStartInCoop;
+ conditional = bConditional;
+
+ if (!conditional || IFTHREAD && g_pEEInterface->GetThread() == NULL)
+ {
+ return;
+ }
+
+ if (g_pEEInterface->IsPreemptiveGCDisabled())
+ {
+ // we're starting in COOP, we need to switch to PREEMP
+ startInCoop = true;
+ g_pEEInterface->EnablePreemptiveGC();
+ }
+ else
+ {
+ // We're starting in PREEMP, no need to switch
+ startInCoop = false;
+ }
+ }
+
+ void LeaveInternal()
+ {
+ if (!conditional || IFTHREAD && g_pEEInterface->GetThread() == NULL)
+ {
+ return;
+ }
+
+ _ASSERTE(!g_pEEInterface->IsPreemptiveGCDisabled());
+
+ if (TOGGLE)
+ {
+ // Explicitly toggle to COOP for eventin
+ g_pEEInterface->DisablePreemptiveGC();
+
+ // If we started in PREEMPTIVE switch back to PREEMPTIVE
+ if (!startInCoop)
+ {
+ g_pEEInterface->EnablePreemptiveGC();
+ }
+ }
+ else
+ {
+ // If we started in COOP, flip back to COOP at the end of the
+ // scope, if we started in preemptive we should be fine.
+ if (startInCoop)
+ {
+ g_pEEInterface->DisablePreemptiveGC();
+ }
+ }
+ }
+
+public:
+ DEBUG_NOINLINE GCHolderEEInterface()
+ {
+ SCAN_SCOPE_BEGIN;
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+
+ this->EnterInternal(false, true);
+ }
+
+ DEBUG_NOINLINE GCHolderEEInterface(bool bConditional)
+ {
+ SCAN_SCOPE_BEGIN;
+ if (bConditional)
+ {
+ STATIC_CONTRACT_MODE_PREEMPTIVE;
+ }
+
+ this->EnterInternal(false, bConditional);
+ }
+
+ DEBUG_NOINLINE ~GCHolderEEInterface()
+ {
+ SCAN_SCOPE_END;
+
+ this->LeaveInternal();
+ };
+};
+#endif //DACCESS_COMPILE
+
+#define GCX_COOP_EEINTERFACE() \
+ GCHolderEEInterface<TRUE, FALSE, FALSE> __gcCoop_onlyOneAllowedPerScope
+
+#define GCX_PREEMP_EEINTERFACE() \
+ GCHolderEEInterface<FALSE, FALSE, FALSE> __gcCoop_onlyOneAllowedPerScope
+
+#define GCX_COOP_EEINTERFACE_TOGGLE() \
+ GCHolderEEInterface<TRUE, TRUE, FALSE> __gcCoop_onlyOneAllowedPerScope
+
+#define GCX_PREEMP_EEINTERFACE_TOGGLE() \
+ GCHolderEEInterface<FALSE, TRUE, FALSE> __gcCoop_onlyOneAllowedPerScope
+
+#define GCX_PREEMP_EEINTERFACE_TOGGLE_IFTHREAD() \
+ GCHolderEEInterface<FALSE, TRUE, TRUE> __gcCoop_onlyOneAllowedPerScope
+
+#define GCX_PREEMP_EEINTERFACE_TOGGLE_COND(cond) \
+ GCHolderEEInterface<FALSE, TRUE, FALSE> __gcCoop_onlyOneAllowedPerScope((cond))
+
+#define GCX_PREEMP_EEINTERFACE_TOGGLE_IFTHREAD_COND(cond) \
+ GCHolderEEInterface<FALSE, TRUE, TRUE> __gcCoop_onlyOneAllowedPerScope((cond))
+
+
+
+// There are still some APIs that call new that we call from the helper thread.
+// These are unsafe operations, so we wrap them here. Each of these is a potential hang.
+inline DWORD UnsafeGetConfigDWORD_DontUse_(LPCWSTR name, DWORD defValue)
+{
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ return REGUTIL::GetConfigDWORD_DontUse_(name, defValue);
+}
+
+inline DWORD UnsafeGetConfigDWORD(const CLRConfig::ConfigDWORDInfo & info)
+{
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ return CLRConfig::GetConfigValue(info);
+}
+
+#define FILE_DEBUG INDEBUG(__FILE__) NOT_DEBUG(NULL)
+#define LINE_DEBUG INDEBUG(__LINE__) NOT_DEBUG(0)
+
+#define CORDBDebuggerSetUnrecoverableWin32Error(__d, __code, __w) \
+ ((__d)->UnrecoverableError(HRESULT_FROM_WIN32(GetLastError()), \
+ (__code), FILE_DEBUG, LINE_DEBUG, (__w)), \
+ HRESULT_FROM_GetLastError())
+
+#define CORDBDebuggerSetUnrecoverableError(__d, __hr, __w) \
+ (__d)->UnrecoverableError((__hr), \
+ (__hr), FILE_DEBUG, LINE_DEBUG, (__w))
+
+#define CORDBUnrecoverableError(__d) ((__d)->m_unrecoverableError == TRUE)
+
+/* ------------------------------------------------------------------------ *
+ * Helpers used for contract preconditions.
+ * ------------------------------------------------------------------------ */
+
+
+bool ThisIsHelperThreadWorker(void);
+bool ThisIsTempHelperThread();
+bool ThisIsTempHelperThread(DWORD tid);
+
+#ifdef _DEBUG
+
+// Functions can be split up into 3 categories:
+// 1.) Functions that must run on the helper thread.
+// Returns true if this is the helper thread (or the thread
+// doing helper-threadduty).
+
+// 2.) Functions that can't run on the helper thread.
+// This is just !ThisIsHelperThread();
+
+// 3.) Functions that may or may not run on the helper thread.
+// Note this is trivially true, but it's presences means that
+// we're not case #1 or #2, so it's still valuable.
+inline bool ThisMaybeHelperThread() { return true; }
+
+#endif
+
+
+// These are methods for transferring information between a REGDISPLAY and
+// a DebuggerREGDISPLAY.
+extern void CopyREGDISPLAY(REGDISPLAY* pDst, REGDISPLAY* pSrc);
+extern void SetDebuggerREGDISPLAYFromREGDISPLAY(DebuggerREGDISPLAY* pDRD, REGDISPLAY* pRD);
+
+//
+// PUSHED_REG_ADDR gives us NULL if the register still lives in the thread's context, or it gives us the address
+// of where the register was pushed for this frame.
+//
+// This macro is used in CopyREGDISPLAY() and SetDebuggerREGDISPLAYFromREGDISPLAY(). We really should make
+// DebuggerREGDISPLAY to be a class with these two methods, but unfortunately, the RS has no notion of REGDISPLAY.
+inline LPVOID PushedRegAddr(REGDISPLAY* pRD, LPVOID pAddr)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#if defined(_TARGET_AMD64_)
+ if ( ((UINT_PTR)(pAddr) >= (UINT_PTR)pRD->pCurrentContextPointers) &&
+ ((UINT_PTR)(pAddr) <= ((UINT_PTR)pRD->pCurrentContextPointers + sizeof(_KNONVOLATILE_CONTEXT_POINTERS))) )
+#else
+ if ( ((UINT_PTR)(pAddr) >= (UINT_PTR)pRD->pContext) &&
+ ((UINT_PTR)(pAddr) <= ((UINT_PTR)pRD->pContext + sizeof(T_CONTEXT))) )
+#endif
+ return NULL;
+
+ // (Microsoft 2/9/07 - putting this in an else clause confuses gcc for some reason, so I've moved
+ // it to here)
+ return pAddr;
+}
+
+bool HandleIPCEventWrapper(Debugger* pDebugger, DebuggerIPCEvent *e);
+
+HRESULT ValidateObject(Object *objPtr);
+
+//-----------------------------------------------------------------------------
+// Execution control needs several ways to get at the context of a thread
+// stopped in mangaged code (stepping, setip, func-eval).
+// We want to abstract away a few things:
+// - active: this thread is stopped at a patch
+// - inactive: this threads was managed suspended somewhere in jitted code
+// because of some other active thread.
+//
+// In general, execution control operations administered from the helper thread
+// can occur on any managed thread (active or inactive).
+// Intermediate triggers (eg, TriggerPatch) only occur on an active thread.
+//
+// Viewing the context in terms of Active vs. Inactive lets us abstract away
+// filter context, redirected context, and interop hijacks.
+//-----------------------------------------------------------------------------
+
+// Get the context for a thread stopped (perhaps temporarily) in managed code.
+// The process may be live or stopped.
+// This thread could be 'active' (stopped at patch) or inactive.
+// This context should always be in managed code and this context can be manipulated
+// for execution control (setip, single-step, func-eval, etc)
+// Returns NULL if not available.
+CONTEXT * GetManagedStoppedCtx(Thread * pThread);
+
+// Get the context for a thread live in or around managed code.
+// Caller guarantees this is active.
+// This ctx is just for a 'live' thread. This means that the ctx may include
+// from a M2U hijack or from a Native patch (like .
+// Never NULL.
+CONTEXT * GetManagedLiveCtx(Thread * pThread);
+
+
+#undef UtilMessageBoxCatastrophic
+#undef UtilMessageBoxCatastrophicNonLocalized
+#undef UtilMessageBoxCatastrophicVA
+#undef UtilMessageBoxCatastrophicNonLocalizedVA
+#undef UtilMessageBox
+#undef UtilMessageBoxNonLocalized
+#undef UtilMessageBoxVA
+#undef UtilMessageBoxNonLocalizedVA
+#undef WszMessageBox
+#define UtilMessageBoxCatastrophic __error("Use g_pDebugger->MessageBox from inside the left side of the debugger")
+#define UtilMessageBoxCatastrophicNonLocalized __error("Use g_pDebugger->MessageBox from inside the left side of the debugger")
+#define UtilMessageBoxCatastrophicVA __error("Use g_pDebugger->MessageBox from inside the left side of the debugger")
+#define UtilMessageBoxCatastrophicNonLocalizedVA __error("Use g_pDebugger->MessageBox from inside the left side of the debugger")
+#define UtilMessageBox __error("Use g_pDebugger->MessageBox from inside the left side of the debugger")
+#define UtilMessageBoxNonLocalized __error("Use g_pDebugger->MessageBox from inside the left side of the debugger")
+#define UtilMessageBoxVA __error("Use g_pDebugger->MessageBox from inside the left side of the debugger")
+#define UtilMessageBoxNonLocalizedVA __error("Use g_pDebugger->MessageBox from inside the left side of the debugger")
+#define WszMessageBox __error("Use g_pDebugger->MessageBox from inside the left side of the debugger")
+
+
+/* ------------------------------------------------------------------------ *
+ * Module classes
+ * ------------------------------------------------------------------------ */
+
+// Once a module / appdomain is unloaded, all Right-side objects (such as breakpoints)
+// in that appdomain will get neutered and will thus be prevented from accessing
+// the unloaded appdomain.
+//
+// @dbgtodo jmc - This is now purely relegated to the LS. Eventually completely get rid of this
+// by moving fields off to Module or getting rid of the fields completely.
+typedef DPTR(class DebuggerModule) PTR_DebuggerModule;
+class DebuggerModule
+{
+ public:
+ DebuggerModule(Module * pRuntimeModule, DomainFile * pDomainFile, AppDomain * pAppDomain);
+
+ // Do we have any optimized code in the module?
+ // JMC-probes aren't emitted in optimized code,
+ bool HasAnyOptimizedCode();
+
+ // If the debugger updates things to allow/disallow optimized code, then we have to track that.
+ void MarkAllowedOptimizedCode();
+ void UnmarkAllowedOptimizedCode();
+
+
+ BOOL ClassLoadCallbacksEnabled(void);
+ void EnableClassLoadCallbacks(BOOL f);
+
+ AppDomain* GetAppDomain();
+
+ Module * GetRuntimeModule();
+
+
+ // <TODO> (8/12/2002)
+ // Currently we create a new DebuggerModules for each appdomain a shared
+ // module lives in. We then pretend there aren't any shared modules.
+ // This is bad. We need to move away from this.
+ // Once we stop lying, then every module will be it's own PrimaryModule. :)
+ //
+ // Currently, Module* is 1:n w/ DebuggerModule.
+ // We add a notion of PrimaryModule so that:
+ // Module* is 1:1 w/ DebuggerModule::GetPrimaryModule();
+ // This should help transition towards exposing shared modules.
+ // If the Runtime module is shared, then this gives a common DM.
+ // If the runtime module is not shared, then this is an identity function.
+ //
+ // The runtime has the notion of "DomainFile", which is 1:1 with DebuggerModule
+ // and thus 1:1 with CordbModule. The CordbModule hash table on the RS now uses
+ // the DomainFile as the key instead of DebuggerModule. This is a temporary
+ // workaround to facilitate the removal of DebuggerModule.
+ // </TODO>
+ DebuggerModule * GetPrimaryModule();
+ DomainFile * GetDomainFile()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pRuntimeDomainFile;
+ }
+
+ // Called by DebuggerModuleTable to set our primary module
+ void SetPrimaryModule(DebuggerModule * pPrimary);
+
+ void SetCanChangeJitFlags(bool fCanChangeJitFlags);
+
+ private:
+ BOOL m_enableClassLoadCallbacks;
+
+ // First step in moving away from hiding shared modules.
+ DebuggerModule* m_pPrimaryModule;
+
+ PTR_Module m_pRuntimeModule;
+ PTR_DomainFile m_pRuntimeDomainFile;
+
+ AppDomain* m_pAppDomain;
+
+ bool m_fHasOptimizedCode;
+
+ void PickPrimaryModule();
+
+ // Can we change jit flags on the module?
+ // This is true during the Module creation
+ bool m_fCanChangeJitFlags;
+
+
+};
+
+/* ------------------------------------------------------------------------ *
+ * Hash to hold pending func evals by thread id
+ * ------------------------------------------------------------------------ */
+
+struct DebuggerPendingFuncEval
+{
+ FREEHASHENTRY entry;
+ PTR_Thread pThread;
+ PTR_DebuggerEval pDE;
+};
+
+typedef DPTR(struct DebuggerPendingFuncEval) PTR_DebuggerPendingFuncEval;
+
+/* ------------------------------------------------------------------------ *
+ * DebuggerRCThread class -- the Runtime Controller thread.
+ * ------------------------------------------------------------------------ */
+
+#define DRCT_CONTROL_EVENT 0
+#define DRCT_RSEA 1
+#define DRCT_FAVORAVAIL 2
+#define DRCT_COUNT_INITIAL 3
+
+#define DRCT_DEBUGGER_EVENT 3
+#define DRCT_COUNT_FINAL 4
+
+
+
+
+
+
+// Canary is used as way to have a runtime failure for the SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE
+// contract violation.
+// Have a macro which checks the canary and then uses the Suppress macro.
+// We need this check to be a macro in order to chain to the Suppress_allocation macro.
+#define CHECK_IF_CAN_TAKE_HELPER_LOCKS_IN_THIS_SCOPE(pHR, pCanary) \
+ { \
+ HelperCanary * __pCanary = (pCanary); \
+ if (!__pCanary->AreLocksAvailable()) { \
+ (*pHR) = CORDBG_E_HELPER_MAY_DEADLOCK; \
+ } else { \
+ (*pHR) = S_OK; \
+ } \
+ } \
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE \
+ ; \
+
+
+// Mechanics for cross-thread call to helper thread (called "Favor").
+class HelperThreadFavor
+{
+ // Only let RCThread access these fields.
+ friend class DebuggerRCThread;
+
+ HelperThreadFavor();
+ // No dtor because we intentionally leak all shutdown.
+ void Init();
+
+protected:
+ // Stuff for having the helper thread do function calls for a thread
+ // that blew its stack
+ FAVORCALLBACK m_fpFavor;
+ void *m_pFavorData;
+ HANDLE m_FavorReadEvent;
+ Crst m_FavorLock;
+
+ HANDLE m_FavorAvailableEvent;
+};
+
+
+// The *LazyInit classes represents storage that the debugger doesn't need until after it has started up.
+// This is effectively an extension to the debugger class; but for perf reasons, we only
+// want to instantiate it if we're actually debugging.
+
+// Fields that are a logical extension of RCThread
+class RCThreadLazyInit
+{
+ // Only let RCThread access these fields.
+ friend class DebuggerRCThread;
+
+public:
+ RCThreadLazyInit() { }
+ ~RCThreadLazyInit() { }
+
+ void Init() { }
+protected:
+
+
+
+ HelperCanary m_Canary;
+};
+
+// Fields that are a logical extension of Debugger
+class DebuggerLazyInit
+{
+ friend class Debugger;
+public:
+ DebuggerLazyInit();
+ ~DebuggerLazyInit();
+
+protected:
+ void Init();
+
+ DebuggerPendingFuncEvalTable *m_pPendingEvals;
+
+ // The "debugger data lock" is a very small leaf lock used to protect debugger internal data structures (such
+ // as DJIs, DMIs, module table). It is a GC-unsafe-anymode lock and so it can't trigger a GC while being held.
+ // It also can't issue any callbacks into the EE or anycode that it does not directly control.
+ // This is a separate lock from the the larger Debugger-lock / Controller lock, which allows regions under those
+ // locks to access debugger datastructures w/o blocking each other.
+ Crst m_DebuggerDataLock;
+ HANDLE m_CtrlCMutex;
+ HANDLE m_exAttachEvent;
+ HANDLE m_exUnmanagedAttachEvent;
+
+ BOOL m_DebuggerHandlingCtrlC;
+
+ // Used by MapAndBindFunctionBreakpoints. Note that this is thread-safe
+ // only b/c we access it from within the DebuggerController::Lock
+ SIZE_T_UNORDERED_ARRAY m_BPMappingDuplicates;
+
+ UnorderedPtrArray m_pMemBlobs;
+
+ // Hang RCThread fields off DebuggerLazyInit to avoid an extra pointer.
+ RCThreadLazyInit m_RCThread;
+};
+typedef DPTR(DebuggerLazyInit) PTR_DebuggerLazyInit;
+
+class DebuggerRCThread
+{
+public:
+ DebuggerRCThread(Debugger * pDebugger);
+ virtual ~DebuggerRCThread();
+ void CloseIPCHandles();
+
+ //
+ // You create a new instance of this class, call Init() to set it up,
+ // then call Start() start processing events. Stop() terminates the
+ // thread and deleting the instance cleans all the handles and such
+ // up.
+ //
+ HRESULT Init(void);
+ HRESULT Start(void);
+ HRESULT AsyncStop(void);
+
+ //
+ // These are used by this thread to send IPC events to the Debugger
+ // Interface side.
+ //
+ DebuggerIPCEvent* GetIPCEventSendBuffer()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+#ifdef LOGGING
+ if(IsRCThreadReady()) {
+ LOG((LF_CORDB, LL_EVERYTHING, "RCThread is ready\n"));
+ }
+#endif
+
+ _ASSERTE(m_pDCB != NULL);
+ // In case this turns into a continuation event
+ GetRCThreadSendBuffer()->next = NULL;
+ LOG((LF_CORDB,LL_EVERYTHING, "GIPCESBuffer: got event 0x%x\n", GetRCThreadSendBuffer()));
+
+ return GetRCThreadSendBuffer();
+ }
+
+ DebuggerIPCEvent *GetIPCEventSendBufferContinuation(
+ DebuggerIPCEvent *eventCur)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(eventCur != NULL);
+ PRECONDITION(eventCur->next == NULL);
+ }
+ CONTRACTL_END;
+
+ DebuggerIPCEvent *dipce = (DebuggerIPCEvent *) new (nothrow) BYTE [CorDBIPC_BUFFER_SIZE];
+ dipce->next = NULL;
+
+ LOG((LF_CORDB,LL_INFO1000000, "About to GIPCESBC 0x%x\n",dipce));
+
+ if (dipce != NULL)
+ {
+ eventCur->next = dipce;
+ }
+#ifdef _DEBUG
+ else
+ {
+ _ASSERTE( !"GetIPCEventSendBufferContinuation failed to allocate mem!" );
+ }
+#endif //_DEBUG
+
+ return dipce;
+ }
+
+ // Send an IPCEvent once we're ready for sending. This should be done inbetween
+ // SENDIPCEVENT_BEGIN & SENDIPCEVENT_END. See definition of SENDIPCEVENT_BEGIN
+ // for usage pattern
+ HRESULT SendIPCEvent();
+
+ HRESULT EnsureRuntimeOffsetsInit(IpcTarget i); // helper function for SendIPCEvent
+ void NeedRuntimeOffsetsReInit(IpcTarget i);
+
+ DebuggerIPCEvent* GetIPCEventReceiveBuffer()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+ _ASSERTE(m_pDCB != NULL);
+
+ return GetRCThreadReceiveBuffer();
+ }
+
+ HRESULT SendIPCReply();
+
+ //
+ // Handle Favors - get the Helper thread to do a function call for us
+ // because our thread can't (eg, we don't have the stack space)
+ // DoFavor will call (*fp)(pData) and block until fp returns.
+ // pData can store parameters, return value, and a this ptr (if we
+ // need to call a member function)
+ //
+ void DoFavor(FAVORCALLBACK fp, void * pData);
+
+ //
+ // Convience routines
+ //
+ PTR_DebuggerIPCControlBlock GetDCB()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ // This may be called before we init or after we shutdown.
+
+ return m_pDCB;
+ }
+
+ void WatchForStragglers(void);
+
+ HRESULT SetupRuntimeOffsets(DebuggerIPCControlBlock *pDCB);
+
+ bool HandleRSEA();
+ void MainLoop();
+ void TemporaryHelperThreadMainLoop();
+
+ HANDLE GetHelperThreadCanGoEvent(void) {LIMITED_METHOD_CONTRACT; return m_helperThreadCanGoEvent; }
+
+ void EarlyHelperThreadDeath(void);
+
+ void RightSideDetach(void);
+
+ //
+ //
+ //
+ void ThreadProc(void);
+ static DWORD WINAPI ThreadProcStatic(LPVOID parameter);
+ static DWORD WINAPI ThreadProcRemote(LPVOID parameter);
+
+ DWORD GetRCThreadId()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pDCB->m_helperThreadId;
+ }
+
+ // Return true if the Helper Thread up & initialized.
+ bool IsRCThreadReady();
+
+ HRESULT ReDaclEvents(PSECURITY_DESCRIPTOR securityDescriptor);
+private:
+
+ // The transport based communication protocol keeps the send and receive buffers outside of the DCB
+ // to keep the DCB size down (since we send it over the wire).
+ DebuggerIPCEvent * GetRCThreadReceiveBuffer()
+ {
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(useTransport)
+ {
+ return reinterpret_cast<DebuggerIPCEvent *>(&m_receiveBuffer[0]);
+ }
+ else
+ {
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+ return reinterpret_cast<DebuggerIPCEvent *>(&m_pDCB->m_receiveBuffer[0]);
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+#endif
+ }
+
+ // The transport based communication protocol keeps the send and receive buffers outside of the DCB
+ // to keep the DCB size down (since we send it over the wire).
+ DebuggerIPCEvent * GetRCThreadSendBuffer()
+ {
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(useTransport)
+ {
+ return reinterpret_cast<DebuggerIPCEvent *>(&m_sendBuffer[0]);
+ }
+ else
+ {
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+ return reinterpret_cast<DebuggerIPCEvent *>(&m_pDCB->m_sendBuffer[0]);
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+ }
+
+ FAVORCALLBACK GetFavorFnPtr() { return m_favorData.m_fpFavor; }
+ void * GetFavorData() { return m_favorData.m_pFavorData; }
+
+ void SetFavorFnPtr(FAVORCALLBACK fp, void * pData)
+ {
+ m_favorData.m_fpFavor = fp;
+ m_favorData.m_pFavorData = pData;
+ }
+ Crst * GetFavorLock() { return &m_favorData.m_FavorLock; }
+
+ HANDLE GetFavorReadEvent() { return m_favorData.m_FavorReadEvent; }
+ HANDLE GetFavorAvailableEvent() { return m_favorData.m_FavorAvailableEvent; }
+
+ HelperThreadFavor m_favorData;
+
+
+ HelperCanary * GetCanary() { return &GetLazyData()->m_Canary; }
+
+
+ friend class Debugger;
+ HRESULT VerifySecurityOnRSCreatedEvents(HANDLE sse, HANDLE lsea, HANDLE lser);
+ Debugger* m_debugger;
+
+ // IPC_TARGET_* define default targets - if we ever want to do
+ // multiple right sides, we'll have to switch to a OUTOFPROC + iTargetProcess scheme
+ PTR_DebuggerIPCControlBlock m_pDCB;
+
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ // These buffers move here out of the DebuggerIPCControlBlock since the block is not shared memory when
+ // using the transport, but we do send its contents over the wire (and these buffers would greatly impact
+ // the number of bytes sent without being useful in any way).
+ BYTE m_receiveBuffer[CorDBIPC_BUFFER_SIZE];
+ BYTE m_sendBuffer[CorDBIPC_BUFFER_SIZE];
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+
+ HANDLE m_thread;
+ bool m_run;
+
+ HANDLE m_threadControlEvent;
+ HANDLE m_helperThreadCanGoEvent;
+ bool m_rgfInitRuntimeOffsets[IPC_TARGET_COUNT];
+ bool m_fDetachRightSide;
+
+ RCThreadLazyInit * GetLazyData();
+#ifdef _DEBUG
+ // Tracking to ensure that the helper thread only calls New() on the interop-safe heap.
+ // We need a very light-weight way to track the helper b/c we need to check everytime somebody
+ // calls operator new, which may occur during shutdown paths.
+ static EEThreadId s_DbgHelperThreadId;
+
+ friend void AssertAllocationAllowed();
+
+public:
+ // The OS ThreadId of the helper as determined from the CreateThread call.
+ DWORD m_DbgHelperThreadOSTid;
+private:
+#endif
+
+};
+
+typedef DPTR(DebuggerRCThread) PTR_DebuggerRCThread;
+
+/* ------------------------------------------------------------------------ *
+ * Debugger Method Info struct and hash table
+ * ------------------------------------------------------------------------ */
+
+// class DebuggerMethodInfo: Struct to hold all the information
+// necessary for a given function.
+//
+// m_module, m_token: Method that this DMI applies to
+//
+const bool bOriginalToInstrumented = true;
+const bool bInstrumentedToOriginal = false;
+
+class DebuggerMethodInfo
+{
+ // This is the most recent version of the function based on the latest update and is
+ // set in UpdateFunction. When a function is jitted, the version is copied from here
+ // and stored in the corresponding DebuggerJitInfo structure so can always know the
+ // version of a particular jitted function.
+ SIZE_T m_currentEnCVersion;
+
+public:
+ PTR_Module m_module;
+ mdMethodDef m_token;
+
+ PTR_DebuggerMethodInfo m_prevMethodInfo;
+ PTR_DebuggerMethodInfo m_nextMethodInfo;
+
+
+ // Enumerate DJIs
+ // Expected usage:
+ // DMI.InitDJIIterator(&it);
+ // while(!it.IsAtEnd()) {
+ // f(it.Current()); it.Next();
+ // }
+ class DJIIterator
+ {
+ friend class DebuggerMethodInfo;
+
+ DebuggerJitInfo* m_pCurrent;
+ Module* m_pLoaderModuleFilter;
+ public:
+ DJIIterator();
+
+ bool IsAtEnd();
+ DebuggerJitInfo * Current();
+ void Next(BOOL fFirst = FALSE);
+
+ };
+
+ // Ensure the DJI cache is completely up to date. (This is heavy weight).
+ void CreateDJIsForNativeBlobs(AppDomain * pAppDomain, Module * pModuleFilter = NULL);
+
+ // Get an iterator for all native blobs (accounts for Generics, Enc, + Prejiiting).
+ // Must be stopped when we do this. This could be heavy weight.
+ // This will call CreateDJIsForNativeBlobs() to ensure we have all DJIs available.
+ // You may optionally pass pLoaderModuleFilter to restrict the DJIs iterated to
+ // exist only on MethodDescs whose loader module matches the filter (pass NULL not
+ // to filter by loader module).
+ void IterateAllDJIs(AppDomain * pAppDomain, Module * pLoaderModuleFilter, DJIIterator * pEnum);
+
+private:
+ // The linked list of JIT's of this version of the method. This will ALWAYS
+ // contain one element except for code in generic classes or generic methods,
+ // which may get JITted more than once under different type instantiations.
+ //
+ // We find the appropriate JitInfo by searching the list (nearly always this
+ // will return the first element of course).
+ //
+ // The JitInfos contain back pointers to this MethodInfo. They should never be associated
+ // with any other MethodInfo.
+ //
+ // USE ACCESSOR FUNCTION GetLatestJitInfo(), as it does lazy init of this field.
+ //
+
+ PTR_DebuggerJitInfo m_latestJitInfo;
+
+public:
+
+ PTR_DebuggerJitInfo GetLatestJitInfo(MethodDesc *fd);
+
+ DebuggerJitInfo * GetLatestJitInfo_NoCreate();
+
+
+ // Find the DJI corresponding to the specified MD and native start address.
+ DebuggerJitInfo * FindJitInfo(MethodDesc * pMD, TADDR addrNativeStartAddr);
+
+ // Creating the Jit-infos.
+ DebuggerJitInfo *FindOrCreateInitAndAddJitInfo(MethodDesc* fd);
+ DebuggerJitInfo *CreateInitAndAddJitInfo(MethodDesc* fd, TADDR startAddr);
+
+
+ void DeleteJitInfo(DebuggerJitInfo *dji);
+ void DeleteJitInfoList(void);
+
+ // Return true iff this has been jitted.
+ // Since we can create DMIs freely, a DMI's existence doesn't mean that the method was jitted.
+ bool HasJitInfos();
+
+ // Return true iff this has been EnCed since the last time the function was jitted.
+ bool HasMoreRecentEnCVersion();
+
+
+ // Return true iif this is a JMC function, else false.
+ bool IsJMCFunction();
+ void SetJMCStatus(bool fStatus);
+
+
+ DebuggerMethodInfo(Module *module, mdMethodDef token);
+ ~DebuggerMethodInfo();
+
+ // A profiler can remap the IL. We track the "instrumented" IL map here.
+ void SetInstrumentedILMap(COR_IL_MAP * pMap, SIZE_T cEntries);
+ bool HasInstrumentedILMap() {return m_fHasInstrumentedILMap; }
+
+ // TranslateToInstIL will take offOrig, and translate it to the
+ // correct IL offset if this code happens to be instrumented
+ ULONG32 TranslateToInstIL(const InstrumentedILOffsetMapping * pMapping, ULONG32 offOrig, bool fOrigToInst);
+
+
+ // We don't always have a debugger module. (Ex: we're tracking debug info,
+ // but no debugger's attached). So this may return NULL alot.
+ // If we can, we should use the RuntimeModule when ever possible.
+ DebuggerModule* GetPrimaryModule();
+
+ // We always have a runtime module.
+ Module * GetRuntimeModule();
+
+ // Set the latest EnC version number for this method
+ // This doesn't mean we have a DJI for this version yet.
+ void SetCurrentEnCVersion(SIZE_T currentEnCVersion)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(currentEnCVersion >= CorDB_DEFAULT_ENC_FUNCTION_VERSION);
+ m_currentEnCVersion = currentEnCVersion;
+ }
+
+ SIZE_T GetCurrentEnCVersion()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+ return m_currentEnCVersion;
+ }
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+protected:
+ // JMC info. Each method can have its own JMC setting.
+ bool m_fJMCStatus;
+
+ // "Instrumented" IL map set by the profiler.
+ // @dbgtodo execution control - remove this when we do execution control from out-of-proc
+ bool m_fHasInstrumentedILMap;
+};
+
+
+// ------------------------------------------------------------------------ *
+// DebuggerHeap class
+// For interop debugging, we need a heap that:
+// - does not take any outside looks
+// - returns memory which could be executed.
+// ------------------------------------------------------------------------ */
+
+#ifdef FEATURE_INTEROP_DEBUGGING
+ #define USE_INTEROPSAFE_HEAP
+#endif
+
+class DebuggerHeap
+{
+public:
+ DebuggerHeap();
+ ~DebuggerHeap();
+
+ bool IsInit();
+ void Destroy();
+ HRESULT Init(BOOL fExecutable);
+
+ void *Alloc(DWORD size);
+ void *Realloc(void *pMem, DWORD newSize, DWORD oldSize);
+ void Free(void *pMem);
+
+
+protected:
+#ifdef USE_INTEROPSAFE_HEAP
+ HANDLE m_hHeap;
+#endif
+};
+
+class DebuggerJitInfo;
+
+#if defined(WIN64EXCEPTIONS)
+const int PARENT_METHOD_INDEX = -1;
+#endif // WIN64EXCEPTIONS
+
+class CodeRegionInfo
+{
+public:
+ CodeRegionInfo() :
+ m_addrOfHotCode(NULL),
+ m_addrOfColdCode(NULL),
+ m_sizeOfHotCode(0),
+ m_sizeOfColdCode(0)
+ {
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+ }
+
+ static CodeRegionInfo GetCodeRegionInfo(DebuggerJitInfo * dji,
+ MethodDesc * md = NULL,
+ PTR_CORDB_ADDRESS_TYPE addr = PTR_NULL);
+
+ // Fills in the CodeRegoinInfo fields from the start address.
+ void InitializeFromStartAddress(PCODE addr)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ }
+ CONTRACTL_END;
+
+ m_addrOfHotCode = addr;
+ g_pEEInterface->GetMethodRegionInfo(addr,
+ &m_addrOfColdCode,
+ (size_t *) &m_sizeOfHotCode,
+ (size_t *) &m_sizeOfColdCode);
+ }
+
+ // Converts an offset within a method to a code address
+ PCODE OffsetToAddress(SIZE_T offset)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_addrOfHotCode != NULL)
+ {
+ if (offset < m_sizeOfHotCode)
+ {
+ return m_addrOfHotCode + offset;
+ }
+ else
+ {
+ _ASSERTE(m_addrOfColdCode);
+ _ASSERTE(offset <= m_sizeOfHotCode + m_sizeOfColdCode);
+
+ return m_addrOfColdCode + (offset - m_sizeOfHotCode);
+ }
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+
+ // Converts a code address to an offset within the method
+ SIZE_T AddressToOffset(const BYTE *addr)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ PCODE address = (PCODE)addr;
+
+ if ((address >= m_addrOfHotCode) &&
+ (address < m_addrOfHotCode + m_sizeOfHotCode))
+ {
+ return address - m_addrOfHotCode;
+ }
+ else if ((address >= m_addrOfColdCode) &&
+ (address < m_addrOfColdCode + m_sizeOfColdCode))
+ {
+ return address - m_addrOfColdCode + m_sizeOfHotCode;
+ }
+
+ _ASSERTE(!"addressToOffset called with invalid address");
+ return NULL;
+ }
+
+ // Determines whether the address lies within the method
+ bool IsMethodAddress(const BYTE *addr)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ PCODE address = (PCODE)addr;
+ return (((address >= m_addrOfHotCode) &&
+ (address < m_addrOfHotCode + m_sizeOfHotCode)) ||
+ ((address >= m_addrOfColdCode) &&
+ (address < m_addrOfColdCode + m_sizeOfColdCode)));
+ }
+
+ // Determines whether the offset is in the hot section
+ bool IsOffsetHot(SIZE_T offset)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (offset < m_sizeOfHotCode);
+ }
+
+ PCODE getAddrOfHotCode() {LIMITED_METHOD_DAC_CONTRACT; return m_addrOfHotCode;}
+ PCODE getAddrOfColdCode() {LIMITED_METHOD_DAC_CONTRACT; return m_addrOfColdCode;}
+ SIZE_T getSizeOfHotCode() {LIMITED_METHOD_DAC_CONTRACT; return m_sizeOfHotCode;}
+ SIZE_T getSizeOfColdCode() {LIMITED_METHOD_DAC_CONTRACT; return m_sizeOfColdCode;}
+ SIZE_T getSizeOfTotalCode(){LIMITED_METHOD_DAC_CONTRACT; return m_sizeOfHotCode + m_sizeOfColdCode; }
+
+private:
+
+ PCODE m_addrOfHotCode;
+ PCODE m_addrOfColdCode;
+ SIZE_T m_sizeOfHotCode;
+ SIZE_T m_sizeOfColdCode;
+};
+
+/* ------------------------------------------------------------------------ *
+ * Debugger JIT Info struct
+ * ------------------------------------------------------------------------ */
+
+// class DebuggerJitInfo: Struct to hold all the JIT information
+// necessary for a given function.
+// - DJIs are 1:1 w/ native codeblobs. They're almost 1:1 w/ Native Method Descs.
+// except that a MethodDesc only refers to the most recent EnC version of a method.
+// - If 2 DJIs are different, they refer to different code-blobs.
+// - DJIs are lazily created, and so you can't safely enumerate them b/c
+// you can't rely on whether they're created or not.
+
+
+//
+// MethodDesc* m_fd: MethodDesc of the method that this DJI applies to
+//
+// CORDB_ADDRESS m_addrOfCode: Address of the code. This will be read by
+// the right side (via ReadProcessMemory) to grab the actual native start
+// address of the jitted method.
+//
+// SIZE_T m_sizeOfCode: Pseudo-private variable: use the GetSkzeOfCode
+// method to get this value.
+//
+// bool m_jitComplete: Set to true once JITComplete has been called.
+//
+// DebuggerILToNativeMap* m_sequenceMap: This is the sequence map, which
+// is actually a collection of IL-Native pairs, where each IL corresponds
+// to a line of source code. Each pair is refered to as a sequence map point.
+//
+// SIZE_T m_lastIL: last nonEPILOG instruction
+//
+// unsigned int m_sequenceMapCount: Count of the DebuggerILToNativeMaps
+// in m_sequenceMap.
+//
+// bool m_sequenceMapSorted: Set to true once m_sequenceMapSorted is sorted
+// into ascending IL order (Debugger::setBoundaries, SortMap).
+//
+
+class DebuggerJitInfo
+{
+public:
+ PTR_MethodDesc m_fd;
+
+ // Loader module is used to control life-time of DebufferJitInfo. Ideally, we would refactor the code to use LoaderAllocator here
+ // instead because of it is what the VM actually uses to track the life time. It would make the debugger interface less chatty.
+ PTR_Module m_pLoaderModule;
+
+ bool m_jitComplete;
+
+#ifdef EnC_SUPPORTED
+ // If this is true, then we've plastered the method with DebuggerEncBreakpoints
+ // and the method has been EnC'd
+ bool m_encBreakpointsApplied;
+#endif //EnC_SUPPORTED
+
+ PTR_DebuggerMethodInfo m_methodInfo;
+
+ CORDB_ADDRESS m_addrOfCode;
+ SIZE_T m_sizeOfCode;
+
+ CodeRegionInfo m_codeRegionInfo;
+
+ PTR_DebuggerJitInfo m_prevJitInfo;
+ PTR_DebuggerJitInfo m_nextJitInfo;
+
+protected:
+ // The jit maps are lazy-initialized.
+ // They are always sorted.
+ ULONG m_lastIL;
+ PTR_DebuggerILToNativeMap m_sequenceMap;
+ unsigned int m_sequenceMapCount;
+ PTR_DebuggerILToNativeMap m_callsiteMap;
+ unsigned int m_callsiteMapCount;
+ bool m_sequenceMapSorted;
+
+ PTR_NativeVarInfo m_varNativeInfo;
+ unsigned int m_varNativeInfoCount;
+
+ bool m_fAttemptInit;
+
+#ifndef DACCESS_COMPILE
+ void LazyInitBounds();
+#else
+ void LazyInitBounds() { LIMITED_METHOD_DAC_CONTRACT; }
+#endif
+
+public:
+ unsigned int GetSequenceMapCount()
+ {
+ SUPPORTS_DAC;
+
+ LazyInitBounds();
+ return m_sequenceMapCount;
+ }
+
+ //@todo: this method could return NULL, but some callers are not handling the case
+ PTR_DebuggerILToNativeMap GetSequenceMap()
+ {
+ SUPPORTS_DAC;
+
+ LazyInitBounds();
+ return m_sequenceMap;
+ }
+
+ unsigned int GetCallsiteMapCount()
+ {
+ SUPPORTS_DAC;
+
+ LazyInitBounds();
+ return m_callsiteMapCount;
+ }
+
+ PTR_DebuggerILToNativeMap GetCallSiteMap()
+ {
+ SUPPORTS_DAC;
+
+ LazyInitBounds();
+ return m_callsiteMap;
+ }
+
+ PTR_NativeVarInfo GetVarNativeInfo()
+ {
+ SUPPORTS_DAC;
+
+ LazyInitBounds();
+ return m_varNativeInfo;
+ }
+
+ unsigned int GetVarNativeInfoCount()
+ {
+ SUPPORTS_DAC;
+
+ LazyInitBounds();
+ return m_varNativeInfoCount;
+ }
+
+
+ // The version number of this jitted code
+ SIZE_T m_encVersion;
+
+#if defined(WIN64EXCEPTIONS)
+ DWORD *m_rgFunclet;
+ int m_funcletCount;
+#endif // WIN64EXCEPTIONS
+
+#ifndef DACCESS_COMPILE
+
+ DebuggerJitInfo(DebuggerMethodInfo *minfo, MethodDesc *fd);
+ ~DebuggerJitInfo();
+
+#endif // #ifdef DACCESS_COMPILE
+
+ class ILToNativeOffsetIterator;
+
+ // Usage of ILToNativeOffsetIterator:
+ //
+ // ILToNativeOffsetIterator it;
+ // dji->InitILToNativeOffsetIterator(&it, ilOffset);
+ // while (!it.IsAtEnd())
+ // {
+ // nativeOffset = it.Current(&fExact);
+ // it.Next();
+ // }
+ struct ILOffset
+ {
+ friend class DebuggerJitInfo;
+ friend class DebuggerJitInfo::ILToNativeOffsetIterator;
+
+ private:
+ SIZE_T m_ilOffset;
+#ifdef WIN64EXCEPTIONS
+ int m_funcletIndex;
+#endif
+ };
+
+ struct NativeOffset
+ {
+ friend class DebuggerJitInfo;
+ friend class DebuggerJitInfo::ILToNativeOffsetIterator;
+
+ private:
+ SIZE_T m_nativeOffset;
+ BOOL m_fExact;
+ };
+
+ class ILToNativeOffsetIterator
+ {
+ friend class DebuggerJitInfo;
+
+ public:
+ ILToNativeOffsetIterator();
+
+ bool IsAtEnd();
+ SIZE_T Current(BOOL* pfExact);
+ SIZE_T CurrentAssertOnlyOne(BOOL* pfExact);
+ void Next();
+
+ private:
+ void Init(DebuggerJitInfo* dji, SIZE_T ilOffset);
+
+ DebuggerJitInfo* m_dji;
+ ILOffset m_currentILOffset;
+ NativeOffset m_currentNativeOffset;
+ };
+
+ void InitILToNativeOffsetIterator(ILToNativeOffsetIterator &it, SIZE_T ilOffset);
+
+ DebuggerILToNativeMap *MapILOffsetToMapEntry(SIZE_T ilOffset, BOOL *exact=NULL, BOOL fWantFirst = TRUE);
+ void MapILRangeToMapEntryRange(SIZE_T ilStartOffset, SIZE_T ilEndOffset,
+ DebuggerILToNativeMap **start,
+ DebuggerILToNativeMap **end);
+ NativeOffset MapILOffsetToNative(ILOffset ilOffset);
+
+ // MapSpecialToNative maps a CordDebugMappingResult to a native
+ // offset so that we can get the address of the prolog & epilog. which
+ // determines which epilog or prolog, if there's more than one.
+ SIZE_T MapSpecialToNative(CorDebugMappingResult mapping,
+ SIZE_T which,
+ BOOL *pfAccurate);
+#if defined(WIN64EXCEPTIONS)
+ void MapSpecialToNative(int funcletIndex, DWORD* pPrologEndOffset, DWORD* pEpilogStartOffset);
+ SIZE_T MapILOffsetToNativeForSetIP(SIZE_T offsetILTo, int funcletIndexFrom, EHRangeTree* pEHRT, BOOL* pExact);
+#endif // _WIN64
+
+ // MapNativeOffsetToIL Takes a given nativeOffset, and maps it back
+ // to the corresponding IL offset, which it returns. If mapping indicates
+ // that a the native offset corresponds to a special region of code (for
+ // example, the epilog), then the return value will be specified by
+ // ICorDebugILFrame::GetIP (see cordebug.idl)
+ DWORD MapNativeOffsetToIL(SIZE_T nativeOffsetToMap,
+ CorDebugMappingResult *mapping,
+ DWORD *which,
+ BOOL skipPrologs=FALSE);
+
+ // If a method has multiple copies of code (because of EnC or code-pitching),
+ // this returns the DJI corresponding to 'pbAddr'
+ DebuggerJitInfo *GetJitInfoByAddress(const BYTE *pbAddr );
+
+ void Init(TADDR newAddress);
+
+#if defined(WIN64EXCEPTIONS)
+ enum GetFuncletIndexMode
+ {
+ GFIM_BYOFFSET,
+ GFIM_BYADDRESS,
+ };
+
+ void InitFuncletAddress();
+ DWORD GetFuncletOffsetByIndex(int index);
+ int GetFuncletIndex(CORDB_ADDRESS offset, GetFuncletIndexMode mode);
+ int GetFuncletCount() {return m_funcletCount;}
+#endif // WIN64EXCEPTIONS
+
+ void SetVars(ULONG32 cVars, ICorDebugInfo::NativeVarInfo *pVars);
+ void SetBoundaries(ULONG32 cMap, ICorDebugInfo::OffsetMapping *pMap);
+
+ ICorDebugInfo::SourceTypes GetSrcTypeFromILOffset(SIZE_T ilOffset);
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+
+ // Debug support
+ CHECK Check() const;
+ CHECK Invariant() const;
+};
+
+#if !defined(DACCESS_COMPILE)
+// @dbgtodo Microsoft inspection: get rid of this class when IPC events are eliminated. It's been copied to
+// dacdbistructures
+/*
+ * class MapSortIL: A template class that will sort an array of DebuggerILToNativeMap.
+ * This class is intended to be instantiated on the stack / in temporary storage, and used to reorder the sequence map.
+ */
+class MapSortIL : public CQuickSort<DebuggerILToNativeMap>
+{
+ public:
+ //Constructor
+ MapSortIL(DebuggerILToNativeMap *map,
+ int count)
+ : CQuickSort<DebuggerILToNativeMap>(map, count) {}
+
+ inline int CompareInternal(DebuggerILToNativeMap *first,
+ DebuggerILToNativeMap *second)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (first->nativeStartOffset == second->nativeStartOffset)
+ return 0;
+ else if (first->nativeStartOffset < second->nativeStartOffset)
+ return -1;
+ else
+ return 1;
+ }
+
+ //Comparison operator
+ int Compare(DebuggerILToNativeMap *first,
+ DebuggerILToNativeMap *second)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ const DWORD call_inst = (DWORD)ICorDebugInfo::CALL_INSTRUCTION;
+
+ //PROLOGs go first
+ if (first->ilOffset == (ULONG) ICorDebugInfo::PROLOG
+ && second->ilOffset == (ULONG) ICorDebugInfo::PROLOG)
+ {
+ return CompareInternal(first, second);
+ } else if (first->ilOffset == (ULONG) ICorDebugInfo::PROLOG)
+ {
+ return -1;
+ } else if (second->ilOffset == (ULONG) ICorDebugInfo::PROLOG)
+ {
+ return 1;
+ }
+ // call_instruction goes at the very very end of the table.
+ else if ((first->source & call_inst) == call_inst
+ && (second->source & call_inst) == call_inst)
+ {
+ return CompareInternal(first, second);
+ } else if ((first->source & call_inst) == call_inst)
+ {
+ return 1;
+ } else if ((second->source & call_inst) == call_inst)
+ {
+ return -1;
+ }
+ //NO_MAPPING go last
+ else if (first->ilOffset == (ULONG) ICorDebugInfo::NO_MAPPING
+ && second->ilOffset == (ULONG) ICorDebugInfo::NO_MAPPING)
+ {
+ return CompareInternal(first, second);
+ } else if (first->ilOffset == (ULONG) ICorDebugInfo::NO_MAPPING)
+ {
+ return 1;
+ } else if (second->ilOffset == (ULONG) ICorDebugInfo::NO_MAPPING)
+ {
+ return -1;
+ }
+ //EPILOGs go next-to-last
+ else if (first->ilOffset == (ULONG) ICorDebugInfo::EPILOG
+ && second->ilOffset == (ULONG) ICorDebugInfo::EPILOG)
+ {
+ return CompareInternal(first, second);
+ } else if (first->ilOffset == (ULONG) ICorDebugInfo::EPILOG)
+ {
+ return 1;
+ } else if (second->ilOffset == (ULONG) ICorDebugInfo::EPILOG)
+ {
+ return -1;
+ }
+ //normal offsets compared otherwise
+ else if (first->ilOffset < second->ilOffset)
+ return -1;
+ else if (first->ilOffset == second->ilOffset)
+ return CompareInternal(first, second);
+ else
+ return 1;
+ }
+};
+
+/*
+ * class MapSortNative: A template class that will sort an array of DebuggerILToNativeMap by the nativeStartOffset field.
+ * This class is intended to be instantiated on the stack / in temporary storage, and used to reorder the sequence map.
+ */
+class MapSortNative : public CQuickSort<DebuggerILToNativeMap>
+{
+ public:
+ //Constructor
+ MapSortNative(DebuggerILToNativeMap *map,
+ int count)
+ : CQuickSort<DebuggerILToNativeMap>(map, count)
+ {
+ WRAPPER_NO_CONTRACT;
+ }
+
+
+ //Returns -1,0,or 1 if first's nativeStartOffset is less than, equal to, or greater than second's
+ int Compare(DebuggerILToNativeMap *first,
+ DebuggerILToNativeMap *second)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (first->nativeStartOffset < second->nativeStartOffset)
+ return -1;
+ else if (first->nativeStartOffset == second->nativeStartOffset)
+ return 0;
+ else
+ return 1;
+ }
+};
+#endif //!DACCESS_COMPILE
+
+/* ------------------------------------------------------------------------ *
+ * Import flares from assembly file
+ * We rely on flares having unique addresses, and so we need to keeps them
+ * from getting folded by the linker (Since they are identical code).
+ * ------------------------------------------------------------------------ */
+
+extern "C" void __stdcall SignalHijackStartedFlare(void);
+extern "C" void __stdcall ExceptionForRuntimeHandoffStartFlare(void);
+extern "C" void __stdcall ExceptionForRuntimeHandoffCompleteFlare(void);
+extern "C" void __stdcall SignalHijackCompleteFlare(void);
+extern "C" void __stdcall ExceptionNotForRuntimeFlare(void);
+extern "C" void __stdcall NotifyRightSideOfSyncCompleteFlare(void);
+extern "C" void __stdcall NotifySecondChanceReadyForDataFlare(void);
+
+/* ------------------------------------------------------------------------ *
+ * Debugger class
+ * ------------------------------------------------------------------------ */
+
+
+// Forward declare some parameter marshalling structs
+struct ShouldAttachDebuggerParams;
+struct EnsureDebuggerAttachedParams;
+struct SendMDANotificationParams;
+
+// class Debugger: This class implements DebugInterface to provide
+// the hooks to the Runtime directly.
+//
+
+class Debugger : public DebugInterface
+{
+ VPTR_VTABLE_CLASS(Debugger, DebugInterface);
+public:
+
+#ifndef DACCESS_COMPILE
+ Debugger();
+ ~Debugger();
+#endif
+
+ // If 0, then not yet initialized. If non-zero, then LS is initialized.
+ LONG m_fLeftSideInitialized;
+
+ // This flag controls the window where SetDesiredNGENCompilerFlags is allowed,
+ // which is until Debugger::StartupPhase2 is complete. Typically it would be
+ // set during the CreateProcess debug event but it could be set other times such
+ // as module load for clr.dll.
+ SVAL_DECL(BOOL, s_fCanChangeNgenFlags);
+
+ friend class DebuggerLazyInit;
+#ifdef TEST_DATA_CONSISTENCY
+ friend class DataTest;
+#endif
+
+ // Checks if the JitInfos table has been allocated, and if not does so.
+ HRESULT inline CheckInitMethodInfoTable();
+ HRESULT inline CheckInitModuleTable();
+ HRESULT CheckInitPendingFuncEvalTable();
+
+#ifndef DACCESS_COMPILE
+ DWORD GetRCThreadId()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (m_pRCThread)
+ return m_pRCThread->GetRCThreadId();
+ else
+ return 0;
+ }
+#endif
+
+ //
+ // Methods exported from the Runtime Controller to the Runtime.
+ // (These are the methods specified by DebugInterface.)
+ //
+ HRESULT Startup(void);
+
+ HRESULT StartupPhase2(Thread * pThread);
+
+ void InitializeLazyDataIfNecessary();
+
+ void LazyInit(); // will throw
+ HRESULT LazyInitWrapper(); // calls LazyInit and converts to HR.
+
+ // Helper on startup to notify debugger
+ void RaiseStartupNotification();
+
+ // Send a raw managed debug event over the managed pipeline.
+ void SendRawEvent(const DebuggerIPCEvent * pManagedEvent);
+
+ // Message box API for the left side of the debugger. This API handles calls from the
+ // debugger helper thread as well as from normal EE threads. It is the only one that
+ // should be used from inside the debugger left side.
+ int MessageBox(
+ UINT uText, // Resource Identifier for Text message
+ UINT uCaption, // Resource Identifier for Caption
+ UINT uType, // Style of MessageBox
+ BOOL displayForNonInteractive, // Display even if the process is running non interactive
+ BOOL showFileNameInTitle, // Flag to show FileName in Caption
+ ...); // Additional Arguments
+
+ void SetEEInterface(EEDebugInterface* i);
+ void StopDebugger(void);
+ BOOL IsStopped(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ // implements DebugInterface but also is called internally
+ return m_stopped;
+ }
+
+
+
+ void ThreadCreated(Thread* pRuntimeThread);
+ void ThreadStarted(Thread* pRuntimeThread);
+ void DetachThread(Thread *pRuntimeThread);
+
+ BOOL SuspendComplete();
+
+ void LoadModule(Module* pRuntimeModule,
+ LPCWSTR pszModuleName,
+ DWORD dwModuleName,
+ Assembly *pAssembly,
+ AppDomain *pAppDomain,
+ DomainFile * pDomainFile,
+ BOOL fAttaching);
+ void LoadModuleFinished(Module* pRuntimeModule, AppDomain * pAppDomain);
+ DebuggerModule * AddDebuggerModule(DomainFile * pDomainFile);
+
+
+ void UnloadModule(Module* pRuntimeModule,
+ AppDomain *pAppDomain);
+ void DestructModule(Module *pModule);
+
+ void RemoveModuleReferences(Module * pModule);
+
+
+ void SendUpdateModuleSymsEventAndBlock(Module * pRuntimeModule, AppDomain * pAppDomain);
+ void SendRawUpdateModuleSymsEvent(Module * pRuntimeModule, AppDomain * pAppDomain);
+
+ BOOL LoadClass(TypeHandle th,
+ mdTypeDef classMetadataToken,
+ Module* classModule,
+ AppDomain *pAppDomain);
+ void UnloadClass(mdTypeDef classMetadataToken,
+ Module* classModule,
+ AppDomain *pAppDomain);
+
+ void SendClassLoadUnloadEvent (mdTypeDef classMetadataToken,
+ DebuggerModule *classModule,
+ Assembly *pAssembly,
+ AppDomain *pAppDomain,
+ BOOL fIsLoadEvent);
+ BOOL SendSystemClassLoadUnloadEvent (mdTypeDef classMetadataToken,
+ Module *classModule,
+ BOOL fIsLoadEvent);
+
+ void SendCatchHandlerFound(Thread *pThread,
+ FramePointer fp,
+ SIZE_T nOffset,
+ DWORD dwFlags);
+
+ LONG NotifyOfCHFFilter(EXCEPTION_POINTERS* pExceptionPointers, PVOID pCatchStackAddr);
+
+
+ bool FirstChanceNativeException(EXCEPTION_RECORD *exception,
+ T_CONTEXT *context,
+ DWORD code,
+ Thread *thread);
+
+ bool IsJMCMethod(Module* pModule, mdMethodDef tkMethod);
+
+ int GetMethodEncNumber(MethodDesc * pMethod);
+
+
+ bool FirstChanceManagedException(Thread *pThread, SIZE_T currentIP, SIZE_T currentSP);
+
+ void FirstChanceManagedExceptionCatcherFound(Thread *pThread,
+ MethodDesc *pMD, TADDR pMethodAddr,
+ BYTE *currentSP,
+ EE_ILEXCEPTION_CLAUSE *pEHClause);
+
+ LONG LastChanceManagedException(EXCEPTION_POINTERS * pExceptionInfo,
+ Thread *pThread,
+ BOOL jitAttachRequested);
+
+ void ManagedExceptionUnwindBegin(Thread *pThread);
+
+ void DeleteInterceptContext(void *pContext);
+
+ void ExceptionFilter(MethodDesc *fd, TADDR pMethodAddr, SIZE_T offset, BYTE *pStack);
+ void ExceptionHandle(MethodDesc *fd, TADDR pMethodAddr, SIZE_T offset, BYTE *pStack);
+
+ int NotifyUserOfFault(bool userBreakpoint, DebuggerLaunchSetting dls);
+
+ SIZE_T GetArgCount(MethodDesc* md, BOOL *fVarArg = NULL);
+
+ void FuncEvalComplete(Thread *pThread, DebuggerEval *pDE);
+
+ DebuggerMethodInfo *CreateMethodInfo(Module *module, mdMethodDef md);
+ void JITComplete(MethodDesc* fd, TADDR newAddress);
+
+ HRESULT RequestFavor(FAVORCALLBACK fp, void * pData);
+
+#ifdef EnC_SUPPORTED
+ HRESULT UpdateFunction(MethodDesc* pFD, SIZE_T encVersion);
+ HRESULT AddFunction(MethodDesc* md, SIZE_T enCVersion);
+ HRESULT UpdateNotYetLoadedFunction(mdMethodDef token, Module * pModule, SIZE_T enCVersion);
+
+ HRESULT AddField(FieldDesc* fd, SIZE_T enCVersion);
+ HRESULT RemapComplete(MethodDesc *pMd, TADDR addr, SIZE_T nativeOffset);
+
+ HRESULT MapILInfoToCurrentNative(MethodDesc *pMD,
+ SIZE_T ilOffset,
+ TADDR nativeFnxStart,
+ SIZE_T *nativeOffset);
+#endif // EnC_SUPPORTED
+
+ void GetVarInfo(MethodDesc * fd, // [IN] method of interest
+ void *DebuggerVersionToken, // [IN] which edit version
+ SIZE_T * cVars, // [OUT] size of 'vars'
+ const ICorDebugInfo::NativeVarInfo **vars // [OUT] map telling where local vars are stored
+ );
+
+ void getBoundariesHelper(MethodDesc * ftn,
+ unsigned int *cILOffsets, DWORD **pILOffsets);
+ void getBoundaries(MethodDesc * ftn,
+ unsigned int *cILOffsets, DWORD **pILOffsets,
+ ICorDebugInfo::BoundaryTypes* implictBoundaries);
+
+ void getVars(MethodDesc * ftn,
+ ULONG32 *cVars, ICorDebugInfo::ILVarInfo **vars,
+ bool *extendOthers);
+
+ DebuggerMethodInfo *GetOrCreateMethodInfo(Module *pModule, mdMethodDef token);
+
+ PTR_DebuggerMethodInfoTable GetMethodInfoTable() { return m_pMethodInfos; }
+
+ // Gets the DJI for 'fd'
+ // If 'pbAddr' is non-NULL and if the method has multiple copies of code
+ // (because of EnC or code-pitching), this returns the DJI corresponding
+ // to 'pbAddr'
+ DebuggerJitInfo *GetJitInfo(MethodDesc *fd, const BYTE *pbAddr, DebuggerMethodInfo **pMethInfo = NULL);
+
+ // Several ways of getting a DJI. DJIs are 1:1 w/ Native Code blobs.
+ // Caller must guarantee good parameters.
+ // DJIs can be lazily created; so the only way these will fail is in an OOM case.
+ DebuggerJitInfo *GetJitInfoFromAddr(TADDR addr);
+
+ // EnC trashes the methoddesc to point to the latest version. Thus given a method-desc,
+ // we can get the most recent DJI.
+ DebuggerJitInfo *GetLatestJitInfoFromMethodDesc(MethodDesc * pMethodDesc);
+
+
+ HRESULT GetILToNativeMapping(MethodDesc *pMD, ULONG32 cMap, ULONG32 *pcMap,
+ COR_DEBUG_IL_TO_NATIVE_MAP map[]);
+
+ HRESULT GetILToNativeMappingIntoArrays(
+ MethodDesc * pMD,
+ USHORT cMapMax,
+ USHORT * pcMap,
+ UINT ** prguiILOffset,
+ UINT ** prguiNativeOffset);
+
+ PRD_TYPE GetPatchedOpcode(CORDB_ADDRESS_TYPE *ip);
+ BOOL CheckGetPatchedOpcode(CORDB_ADDRESS_TYPE *address, /*OUT*/ PRD_TYPE *pOpcode);
+
+ void TraceCall(const BYTE *address);
+
+ bool ThreadsAtUnsafePlaces(void);
+
+
+ void PollWaitingForHelper();
+
+ void IncThreadsAtUnsafePlaces(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ InterlockedIncrement(&m_threadsAtUnsafePlaces);
+ }
+
+ void DecThreadsAtUnsafePlaces(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ InterlockedDecrement(&m_threadsAtUnsafePlaces);
+ }
+
+ static StackWalkAction AtSafePlaceStackWalkCallback(CrawlFrame *pCF,
+ VOID* data);
+ bool IsThreadAtSafePlaceWorker(Thread *thread);
+ bool IsThreadAtSafePlace(Thread *thread);
+
+ CorDebugUserState GetFullUserState(Thread *pThread);
+
+
+ void Terminate();
+ void Continue();
+
+#ifdef FEATURE_LEGACYNETCF_DBG_HOST_CONTROL
+ VOID InvokeLegacyNetCFHostPauseCallback();
+ VOID InvokeLegacyNetCFHostResumeCallback();
+#endif
+
+ bool HandleIPCEvent(DebuggerIPCEvent* event);
+
+ DebuggerModule * LookupOrCreateModule(VMPTR_DomainFile vmDomainFile);
+ DebuggerModule * LookupOrCreateModule(DomainFile * pDomainFile);
+ DebuggerModule * LookupOrCreateModule(Module * pModule, AppDomain * pAppDomain);
+
+ HRESULT GetAndSendInterceptCommand(DebuggerIPCEvent *event);
+
+ //HRESULT GetAndSendJITFunctionData(DebuggerRCThread* rcThread,
+ // mdMethodDef methodToken,
+ // void* functionModuleToken);
+ HRESULT GetFuncData(mdMethodDef funcMetadataToken,
+ DebuggerModule* pDebuggerModule,
+ SIZE_T nVersion,
+ DebuggerIPCE_FuncData *data);
+
+
+ // The following four functions convert between type handles and the data that is
+ // shipped for types to and from the right-side.
+ //
+ // I'm heading toward getting rid of the first two - they are almost never used.
+ static HRESULT ExpandedTypeInfoToTypeHandle(DebuggerIPCE_ExpandedTypeData *data,
+ unsigned int genericArgsCount,
+ DebuggerIPCE_BasicTypeData *genericArgs,
+ TypeHandle *pRes);
+ static HRESULT BasicTypeInfoToTypeHandle(DebuggerIPCE_BasicTypeData *data,
+ TypeHandle *pRes);
+ void TypeHandleToBasicTypeInfo(AppDomain *pAppDomain,
+ TypeHandle th,
+ DebuggerIPCE_BasicTypeData *res);
+
+ // TypeHandleToExpandedTypeInfo returns different DebuggerIPCE_ExpandedTypeData objects
+ // depending on whether the object value that the TypeData corresponds to is
+ // boxed or not. Different parts of the API transfer objects in slightly different ways.
+ // AllBoxed:
+ // For GetAndSendObjectData all values are boxed,
+ //
+ // StructsBoxed:
+ // When returning results from FuncEval only "true" structs
+ // get boxed, i.e. primitives are unboxed.
+ //
+ // NoSpecialBoxing:
+ // TypeHandleToExpandedTypeInfo is also used to report type parameters,
+ // and in this case none of the types are considered boxed (
+ enum AreValueTypesBoxed { NoValueTypeBoxing, OnlyPrimitivesUnboxed, AllBoxed };
+
+ void TypeHandleToExpandedTypeInfo(AreValueTypesBoxed boxed,
+ AppDomain *pAppDomain,
+ TypeHandle th,
+ DebuggerIPCE_ExpandedTypeData *res);
+
+ class TypeDataWalk
+ {
+ DebuggerIPCE_TypeArgData *m_curdata;
+ unsigned int m_remaining;
+
+ public:
+ TypeDataWalk(DebuggerIPCE_TypeArgData *pData, unsigned int nData)
+ {
+ m_curdata = pData;
+ m_remaining = nData;
+ }
+
+
+ // These are for type arguments in the funceval case.
+ // They throw COMPLUS exceptions if they fail, so can only be used during funceval.
+ void ReadTypeHandles(unsigned int nTypeArgs, TypeHandle *pRes);
+ TypeHandle ReadInstantiation(Module *pModule, mdTypeDef tok, unsigned int nTypeArgs);
+ TypeHandle ReadTypeHandle();
+
+ BOOL Finished() { LIMITED_METHOD_CONTRACT; return m_remaining == 0; }
+ DebuggerIPCE_TypeArgData *ReadOne() { LIMITED_METHOD_CONTRACT; if (m_remaining) { m_remaining--; return m_curdata++; } else return NULL; }
+
+ };
+
+
+
+ HRESULT GetMethodDescData(MethodDesc *pFD,
+ DebuggerJitInfo *pJITInfo,
+ DebuggerIPCE_JITFuncData *data);
+
+ void GetAndSendTransitionStubInfo(CORDB_ADDRESS_TYPE *stubAddress);
+
+ void SendBreakpoint(Thread *thread, T_CONTEXT *context,
+ DebuggerBreakpoint *breakpoint);
+
+ void SendStep(Thread *thread, T_CONTEXT *context,
+ DebuggerStepper *stepper,
+ CorDebugStepReason reason);
+
+ void LockAndSendEnCRemapEvent(DebuggerJitInfo * dji, SIZE_T currentIP, SIZE_T *resumeIP);
+ void LockAndSendEnCRemapCompleteEvent(MethodDesc *pFD);
+ void SendEnCUpdateEvent(DebuggerIPCEventType eventType,
+ Module * pModule,
+ mdToken memberToken,
+ mdTypeDef classToken,
+ SIZE_T enCVersion);
+ void LockAndSendBreakpointSetError(PATCH_UNORDERED_ARRAY * listUnbindablePatches);
+
+ // helper for SendException
+ void SendExceptionEventsWorker(
+ Thread * pThread,
+ bool firstChance,
+ bool fIsInterceptable,
+ bool continuable,
+ SIZE_T currentIP,
+ FramePointer framePointer,
+ bool atSafePlace);
+
+ // Main function to send an exception event, handle jit-attach if needed, etc
+ HRESULT SendException(Thread *pThread,
+ bool fFirstChance,
+ SIZE_T currentIP,
+ SIZE_T currentSP,
+ bool fContinuable,
+ bool fAttaching,
+ bool fForceNonInterceptable,
+ EXCEPTION_POINTERS * pExceptionInfo);
+
+ // Top-level function to handle sending a user-breakpoint, jit-attach, sync, etc.
+ void SendUserBreakpoint(Thread * thread);
+
+ // Send the user breakpoint and block waiting for a continue.
+ void SendUserBreakpointAndSynchronize(Thread * pThread);
+
+ // Just send the actual event.
+ void SendRawUserBreakpoint(Thread *thread);
+
+
+
+ void SendInterceptExceptionComplete(Thread *thread);
+
+ HRESULT AttachDebuggerForBreakpoint(Thread *thread,
+ __in_opt __in_z WCHAR *wszLaunchReason);
+
+
+ void ThreadIsSafe(Thread *thread);
+
+ void UnrecoverableError(HRESULT errorHR,
+ unsigned int errorCode,
+ const char *errorFile,
+ unsigned int errorLine,
+ bool exitThread);
+
+ BOOL IsSynchronizing(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_trappingRuntimeThreads;
+ }
+
+ //
+ // The debugger mutex is used to protect any "global" Left Side
+ // data structures. The RCThread takes it when handling a Right
+ // Side event, and Runtime threads take it when processing
+ // debugger events.
+ //
+#ifdef _DEBUG
+ int m_mutexCount;
+#endif
+
+ // Helper function
+ HRESULT AttachDebuggerForBreakpointOnHelperThread(Thread *pThread);
+
+ // helper function to send Exception IPC event and Exception_CallBack2 event
+ HRESULT SendExceptionHelperAndBlock(
+ Thread *pThread,
+ OBJECTHANDLE exceptionHandle,
+ bool continuable,
+ FramePointer framePointer,
+ SIZE_T nOffset,
+ CorDebugExceptionCallbackType eventType,
+ DWORD dwFlags);
+
+
+ // Helper function to send out LogMessage only. Can be either on helper thread or manager thread.
+ void SendRawLogMessage(
+ Thread *pThread,
+ AppDomain *pAppDomain,
+ int iLevel,
+ SString * pCategory,
+ SString * pMessage);
+
+
+ // Helper function to send MDA notification
+ void SendRawMDANotification(SendMDANotificationParams * params);
+ static void SendMDANotificationOnHelperThreadProxy(SendMDANotificationParams * params);
+
+ // Returns a bitfield reflecting the managed debugging state at the time of
+ // the jit attach.
+ CLR_DEBUGGING_PROCESS_FLAGS GetAttachStateFlags();
+
+ // Records that this thread is about to trigger jit attach and
+ // resolves the race for which thread gets to trigger it
+ BOOL PreJitAttach(BOOL willSendManagedEvent, BOOL willLaunchDebugger, BOOL explicitUserRequest);
+
+ // Blocks until the debugger completes jit attach
+ void WaitForDebuggerAttach();
+
+ // Cleans up after jit attach is complete
+ void PostJitAttach();
+
+ // Main worker function to initiate, handle, and wait for a Jit-attach.
+ void JitAttach(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo, BOOL willSendManagedEvent, BOOL explicitUserRequest);
+
+private:
+ void DoNotCallDirectlyPrivateLock(void);
+ void DoNotCallDirectlyPrivateUnlock(void);
+
+ // This function gets the jit debugger launched and waits for the native attach to complete
+ // Make sure you called PreJitAttach and it returned TRUE before you call this
+ HRESULT LaunchJitDebuggerAndNativeAttach(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo);
+
+ // Helper to serialize metadata that has been updated by the profiler into
+ // a buffer so that it can be read out-of-proc
+ BYTE* SerializeModuleMetaData(Module * pModule, DWORD * countBytes);
+
+ /// Wrapps fusion Module FusionCopyPDBs.
+ HRESULT CopyModulePdb(Module* pRuntimeModule);
+
+ // When attaching to a process, this is called to enumerate all of the
+ // AppDomains currently in the process and allow modules pdbs to be copied over to the shadow dir maintaining out V2 in-proc behaviour.
+ HRESULT IterateAppDomainsForPdbs();
+
+#ifndef DACCESS_COMPILE
+public:
+ // Helper function to initialize JDI structure
+ void InitDebuggerLaunchJitInfo(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo);
+
+ // Helper function to retrieve JDI structure
+ JIT_DEBUG_INFO * GetDebuggerLaunchJitInfo(void);
+
+private:
+ static JIT_DEBUG_INFO s_DebuggerLaunchJitInfo;
+ static EXCEPTION_RECORD s_DebuggerLaunchJitInfoExceptionRecord;
+ static CONTEXT s_DebuggerLaunchJitInfoContext;
+
+ static void AcquireDebuggerLock(Debugger *c)
+ {
+ WRAPPER_NO_CONTRACT;
+ c->DoNotCallDirectlyPrivateLock();
+ }
+
+ static void ReleaseDebuggerLock(Debugger *c)
+ {
+ WRAPPER_NO_CONTRACT;
+ c->DoNotCallDirectlyPrivateUnlock();
+ }
+#else // DACCESS_COMPILE
+ static void AcquireDebuggerLock(Debugger *c);
+ static void ReleaseDebuggerLock(Debugger *c);
+#endif // DACCESS_COMPILE
+
+
+public:
+ // define type for DebuggerLockHolder
+ typedef DacHolder<Debugger *, Debugger::AcquireDebuggerLock, Debugger::ReleaseDebuggerLock> DebuggerLockHolder;
+
+ void LockForEventSending(DebuggerLockHolder *dbgLockHolder);
+ void UnlockFromEventSending(DebuggerLockHolder *dbgLockHolder);
+ void SyncAllThreads(DebuggerLockHolder *dbgLockHolder);
+ void SendSyncCompleteIPCEvent();
+
+ // Helper for sending a single pre-baked IPC event and blocking on the continue.
+ // See definition of SENDIPCEVENT_BEGIN for usage pattern.
+ void SendSimpleIPCEventAndBlock();
+
+ void SendCreateProcess(DebuggerLockHolder * pDbgLockHolder);
+
+ void IncrementClassLoadCallbackCount(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ InterlockedIncrement(&m_dClassLoadCallbackCount);
+ }
+
+ void DecrementClassLoadCallbackCount(void)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_dClassLoadCallbackCount > 0);
+ InterlockedDecrement(&m_dClassLoadCallbackCount);
+ }
+
+
+#ifdef _DEBUG_IMPL
+ bool ThreadHoldsLock(void)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (g_fProcessDetach)
+ return true;
+
+ BEGIN_GETTHREAD_ALLOWED;
+ if (g_pEEInterface->GetThread())
+ {
+ return (GetThreadIdHelper(g_pEEInterface->GetThread()) == m_mutexOwner);
+ }
+ else
+ {
+ return (GetCurrentThreadId() == m_mutexOwner);
+ }
+ END_GETTHREAD_ALLOWED;
+ }
+#endif // _DEBUG_IMPL
+
+#ifdef FEATURE_INTEROP_DEBUGGING
+ static VOID M2UHandoffHijackWorker(
+ T_CONTEXT *pContext,
+ EXCEPTION_RECORD *pExceptionRecord);
+
+ LONG FirstChanceSuspendHijackWorker(
+ T_CONTEXT *pContext,
+ EXCEPTION_RECORD *pExceptionRecord);
+ static void GenericHijackFunc(void);
+ static void SecondChanceHijackFunc(void);
+ static void SecondChanceHijackFuncWorker(void);
+ static void SignalHijackStarted(void);
+ static void ExceptionForRuntimeHandoffStart(void);
+ static void ExceptionForRuntimeHandoffComplete(void);
+ static void SignalHijackComplete(void);
+ static void ExceptionNotForRuntime(void);
+ static void NotifyRightSideOfSyncComplete(void);
+ static void NotifySecondChanceReadyForData(void);
+#endif // FEATURE_INTEROP_DEBUGGING
+
+ void UnhandledHijackWorker(T_CONTEXT * pContext, EXCEPTION_RECORD * pRecord);
+
+ //
+ // InsertToMethodInfoList puts the given DMI onto the DMI list.
+ //
+ HRESULT InsertToMethodInfoList(DebuggerMethodInfo *dmi);
+
+
+ // MapBreakpoints will map any and all breakpoints (except EnC
+ // patches) from previous versions of the method into the current version.
+ HRESULT MapAndBindFunctionPatches( DebuggerJitInfo *pJiNew,
+ MethodDesc * fd,
+ CORDB_ADDRESS_TYPE * addrOfCode);
+
+ // MPTDJI takes the given patch (and djiFrom, if you've got it), and
+ // does the IL mapping forwards to djiTo. Returns
+ // CORDBG_E_CODE_NOT_AVAILABLE if there isn't a mapping, which means that
+ // no patch was placed.
+ HRESULT MapPatchToDJI(DebuggerControllerPatch *dcp, DebuggerJitInfo *djiTo);
+
+ HRESULT LaunchDebuggerForUser(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo,
+ BOOL useManagedBPForManagedAttach, BOOL explicitUserRequest);
+
+ void SendLogMessage (int iLevel,
+ SString * pSwitchName,
+ SString * pMessage);
+
+ void SendLogSwitchSetting (int iLevel,
+ int iReason,
+ __in_z LPCWSTR pLogSwitchName,
+ __in_z LPCWSTR pParentSwitchName);
+
+ bool IsLoggingEnabled (void)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_LoggingEnabled)
+ return true;
+ return false;
+ }
+
+ // send a custom debugger notification to the RS
+ void SendCustomDebuggerNotification(Thread * pThread, DomainFile * pDomain, mdTypeDef classToken);
+
+ // Send an MDA notification. This ultimately translates to an ICorDebugMDA object on the Right-Side.
+ void SendMDANotification(
+ Thread * pThread, // may be NULL. Lets us send on behalf of other threads.
+ SString * szName,
+ SString * szDescription,
+ SString * szXML,
+ CorDebugMDAFlags flags,
+ BOOL bAttach
+ );
+
+
+ void EnableLogMessages (bool fOnOff) {LIMITED_METHOD_CONTRACT; m_LoggingEnabled = fOnOff;}
+ bool GetILOffsetFromNative (MethodDesc *PFD, const BYTE *pbAddr,
+ DWORD nativeOffset, DWORD *ilOffset);
+
+ DWORD GetHelperThreadID(void );
+
+
+ HRESULT SetIP( bool fCanSetIPOnly,
+ Thread *thread,
+ Module *module,
+ mdMethodDef mdMeth,
+ DebuggerJitInfo* dji,
+ SIZE_T offsetILTo,
+ BOOL fIsIL);
+
+ // Helper routines used by Debugger::SetIP
+
+ // If we have a varargs function, we can't set the IP (we don't know how to pack/unpack the arguments), so if we
+ // call SetIP with fCanSetIPOnly = true, we need to check for that.
+ BOOL IsVarArgsFunction(unsigned int nEntries, PTR_NativeVarInfo varNativeInfo);
+
+ HRESULT ShuffleVariablesGet(DebuggerJitInfo *dji,
+ SIZE_T offsetFrom,
+ T_CONTEXT *pCtx,
+ SIZE_T **prgVal1,
+ SIZE_T **prgVal2,
+ BYTE ***prgpVCs);
+
+ HRESULT ShuffleVariablesSet(DebuggerJitInfo *dji,
+ SIZE_T offsetTo,
+ T_CONTEXT *pCtx,
+ SIZE_T **prgVal1,
+ SIZE_T **prgVal2,
+ BYTE **rgpVCs);
+
+ HRESULT GetVariablesFromOffset(MethodDesc *pMD,
+ UINT varNativeInfoCount,
+ ICorDebugInfo::NativeVarInfo *varNativeInfo,
+ SIZE_T offsetFrom,
+ T_CONTEXT *pCtx,
+ SIZE_T *rgVal1,
+ SIZE_T *rgVal2,
+ UINT uRgValSize, // number of element of the preallocated rgVal1 and rgVal2
+ BYTE ***rgpVCs);
+
+ HRESULT SetVariablesAtOffset(MethodDesc *pMD,
+ UINT varNativeInfoCount,
+ ICorDebugInfo::NativeVarInfo *varNativeInfo,
+ SIZE_T offsetTo,
+ T_CONTEXT *pCtx,
+ SIZE_T *rgVal1,
+ SIZE_T *rgVal2,
+ BYTE **rgpVCs);
+
+ BOOL IsThreadContextInvalid(Thread *pThread);
+
+ // notification for SQL fiber debugging support
+ void CreateConnection(CONNID dwConnectionId, __in_z WCHAR *wzName);
+ void DestroyConnection(CONNID dwConnectionId);
+ void ChangeConnection(CONNID dwConnectionId);
+
+ //
+ // This function is used to identify the helper thread.
+ //
+ bool ThisIsHelperThread(void);
+
+ HRESULT ReDaclEvents(PSECURITY_DESCRIPTOR securityDescriptor);
+
+#ifdef DACCESS_COMPILE
+ virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ virtual void EnumMemoryRegionsIfFuncEvalFrame(CLRDataEnumMemoryFlags flags, Frame * pFrame);
+#endif
+
+ BOOL ShouldAutoAttach();
+ BOOL FallbackJITAttachPrompt();
+ HRESULT SetFiberMode(bool isFiberMode);
+
+ HRESULT AddAppDomainToIPC (AppDomain *pAppDomain);
+ HRESULT RemoveAppDomainFromIPC (AppDomain *pAppDomain);
+ HRESULT UpdateAppDomainEntryInIPC (AppDomain *pAppDomain);
+
+ void SendCreateAppDomainEvent(AppDomain * pAppDomain);
+ void SendExitAppDomainEvent (AppDomain *pAppDomain);
+
+ // Notify the debugger that an assembly has been loaded
+ void LoadAssembly(DomainAssembly * pDomainAssembly);
+
+ // Notify the debugger that an assembly has been unloaded
+ void UnloadAssembly(DomainAssembly * pDomainAssembly);
+
+ HRESULT FuncEvalSetup(DebuggerIPCE_FuncEvalInfo *pEvalInfo, BYTE **argDataArea, DebuggerEval **debuggerEvalKey);
+ HRESULT FuncEvalSetupReAbort(Thread *pThread, Thread::ThreadAbortRequester requester);
+ HRESULT FuncEvalAbort(DebuggerEval *debuggerEvalKey);
+ HRESULT FuncEvalRudeAbort(DebuggerEval *debuggerEvalKey);
+ HRESULT FuncEvalCleanup(DebuggerEval *debuggerEvalKey);
+
+ HRESULT SetReference(void *objectRefAddress, VMPTR_OBJECTHANDLE vmObjectHandle, void *newReference);
+ HRESULT SetValueClass(void *oldData, void *newData, DebuggerIPCE_BasicTypeData *type);
+
+ HRESULT SetILInstrumentedCodeMap(MethodDesc *fd,
+ BOOL fStartJit,
+ ULONG32 cILMapEntries,
+ COR_IL_MAP rgILMapEntries[]);
+
+ void EarlyHelperThreadDeath(void);
+
+ void ShutdownBegun(void);
+
+ void LockDebuggerForShutdown(void);
+
+ void DisableDebugger(void);
+
+ // Pid of the left side process that this Debugger instance is in.
+ DWORD GetPid(void) { return m_processId; }
+
+ HRESULT NameChangeEvent(AppDomain *pAppDomain, Thread *pThread);
+
+ // send an event to the RS indicating that there's a Ctrl-C or Ctrl-Break
+ BOOL SendCtrlCToDebugger(DWORD dwCtrlType);
+
+ // Allows the debugger to keep an up to date list of special threads
+ HRESULT UpdateSpecialThreadList(DWORD cThreadArrayLength, DWORD *rgdwThreadIDArray);
+
+ // Updates the pointer for the debugger services
+ void SetIDbgThreadControl(IDebuggerThreadControl *pIDbgThreadControl);
+
+#ifndef DACCESS_COMPILE
+ static void AcquireDebuggerDataLock(Debugger *pDebugger);
+
+ static void ReleaseDebuggerDataLock(Debugger *pDebugger);
+
+#else // DACCESS_COMPILE
+ // determine whether the LS holds the data lock. If it does, we will assume the locked data is in an
+ // inconsistent state and will throw an exception. The DAC will execute this if we are executing code
+ // that takes the lock.
+ static void AcquireDebuggerDataLock(Debugger *pDebugger);
+
+ // unimplemented--nothing to do here
+ static void ReleaseDebuggerDataLock(Debugger *pDebugger);
+
+#endif // DACCESS_COMPILE
+
+ // define type for DebuggerDataLockHolder
+ typedef DacHolder<Debugger *, Debugger::AcquireDebuggerDataLock, Debugger::ReleaseDebuggerDataLock> DebuggerDataLockHolder;
+
+#ifdef _DEBUG
+ // Use for asserts
+ bool HasDebuggerDataLock()
+ {
+ // If no lazy data yet, then can't possibly have the debugger-data lock.
+ if (!g_pDebugger->HasLazyData())
+ {
+ return false;
+ }
+ return (g_pDebugger->GetDebuggerDataLock()->OwnedByCurrentThread()) != 0;
+ }
+#endif
+
+
+ // For Just-My-Code (aka Just-User-Code).
+ // The jit injects probes in debuggable managed methods that look like:
+ // if (*pFlag != 0) call JIT_DbgIsJustMyCode.
+ // pFlag is unique per-method constant determined by GetJMCFlagAddr.
+ // JIT_DbgIsJustMyCode will get the ip & fp and call OnMethodEnter.
+
+ // pIP is an ip within the method, right after the prolog.
+#ifndef DACCESS_COMPILE
+ virtual void OnMethodEnter(void * pIP);
+ virtual DWORD* GetJMCFlagAddr(Module * pModule);
+#endif
+
+ // GetJMCFlagAddr provides a unique flag for each module. UpdateModuleJMCFlag
+ // will go through all modules with user-code and set their flag to fStatus.
+ void UpdateAllModuleJMCFlag(bool fStatus);
+ void UpdateModuleJMCFlag(Module * pRuntime, bool fStatus);
+
+ // Set the default JMC status of the specified module. This function
+ // also finds all the DMIs in the specified module and update their
+ // JMC status as well.
+ void SetModuleDefaultJMCStatus(Module * pRuntimeModule, bool fStatus);
+
+#ifndef DACCESS_COMPILE
+ static DWORD GetThreadIdHelper(Thread *pThread);
+#endif // DACCESS_COMPILE
+
+private:
+ DebuggerJitInfo *GetJitInfoWorker(MethodDesc *fd, const BYTE *pbAddr, DebuggerMethodInfo **pMethInfo);
+
+ // Save the necessary information for the debugger to recognize an IP in one of the thread redirection
+ // functions.
+ void InitializeHijackFunctionAddress();
+
+ void InitDebugEventCounting();
+ void DoHelperThreadDuty();
+
+ typedef enum
+ {
+ ATTACH_YES,
+ ATTACH_NO,
+ ATTACH_TERMINATE
+ } ATTACH_ACTION;
+
+ // Returns true if the debugger is not attached and DbgJITDebugLaunchSetting
+ // is set to either ATTACH_DEBUGGER or ASK_USER and the user request attaching.
+ ATTACH_ACTION ShouldAttachDebugger(bool fIsUserBreakpoint);
+ ATTACH_ACTION ShouldAttachDebuggerProxy(bool fIsUserBreakpoint);
+ friend void ShouldAttachDebuggerStub(ShouldAttachDebuggerParams * p);
+ friend struct ShouldAttachDebuggerParams;
+
+ void TrapAllRuntimeThreads();
+ void ReleaseAllRuntimeThreads(AppDomain *pAppDomain);
+
+#ifndef DACCESS_COMPILE
+ // @dbgtodo inspection - eventually, all replies should be removed because requests will be DAC-ized.
+ // Do not call this function unless you are getting ThreadId from RS
+ void InitIPCReply(DebuggerIPCEvent *ipce,
+ DebuggerIPCEventType type)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(ipce != NULL);
+ ipce->type = type;
+ ipce->hr = S_OK;
+
+ ipce->processId = m_processId;
+ // AppDomain, Thread, are already initialized
+ }
+
+ void InitIPCEvent(DebuggerIPCEvent *ipce,
+ DebuggerIPCEventType type,
+ Thread *pThread,
+ AppDomain* pAppDomain)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ InitIPCEvent(ipce, type, pThread, VMPTR_AppDomain::MakePtr(pAppDomain));
+ }
+
+ // Let this function to figure out the unique Id that we will use for Thread.
+ void InitIPCEvent(DebuggerIPCEvent *ipce,
+ DebuggerIPCEventType type,
+ Thread *pThread,
+ VMPTR_AppDomain vmAppDomain)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(ipce != NULL);
+ ipce->type = type;
+ ipce->hr = S_OK;
+ ipce->processId = m_processId;
+ ipce->vmAppDomain = vmAppDomain;
+ ipce->vmThread.SetRawPtr(pThread);
+ }
+
+ void InitIPCEvent(DebuggerIPCEvent *ipce,
+ DebuggerIPCEventType type)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE((type == DB_IPCE_SYNC_COMPLETE) ||
+ (type == DB_IPCE_TEST_CRST) ||
+ (type == DB_IPCE_TEST_RWLOCK));
+
+ Thread *pThread = g_pEEInterface->GetThread();
+ AppDomain *pAppDomain = NULL;
+
+ if (pThread)
+ {
+ pAppDomain = pThread->GetDomain();
+ }
+
+ InitIPCEvent(ipce,
+ type,
+ pThread,
+ VMPTR_AppDomain::MakePtr(pAppDomain));
+ }
+#endif // DACCESS_COMPILE
+
+ HRESULT GetFunctionInfo(Module *pModule,
+ mdToken functionToken,
+ BYTE **pCodeStart,
+ unsigned int *pCodeSize,
+ mdToken *pLocalSigToken);
+
+ // Allocate a buffer and send it to the right side
+ HRESULT GetAndSendBuffer(DebuggerRCThread* rcThread, ULONG bufSize);
+
+ // Allocate a buffer in the left-side for use by the right-side
+ HRESULT AllocateRemoteBuffer( ULONG bufSize, void **ppBuffer );
+
+ // Releases a previously requested remote bufer and send reply
+ HRESULT SendReleaseBuffer(DebuggerRCThread* rcThread, void *pBuffer);
+
+public:
+ // Release previously requested remmote buffer
+ HRESULT ReleaseRemoteBuffer(void *pBuffer, bool removeFromBlobList);
+
+private:
+#ifdef EnC_SUPPORTED
+ // Apply an EnC edit and send the result event to the RS
+ HRESULT ApplyChangesAndSendResult(DebuggerModule * pDebuggerModule,
+ DWORD cbMetadata,
+ BYTE *pMetadata,
+ DWORD cbIL,
+ BYTE *pIL);
+#endif // EnC_SUPPORTED
+
+ bool GetCompleteDebuggerLaunchString(SString * pStrArgsBuf);
+
+ // Launch a debugger for jit-attach
+ void EnsureDebuggerAttached(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo, BOOL willSendManagedEvent, BOOL explicitUserRequest);
+ HRESULT EDAHelper(PROCESS_INFORMATION * pProcessInfo);
+ HRESULT EDAHelperProxy(PROCESS_INFORMATION * pProcessInfo);
+ friend void EDAHelperStub(EnsureDebuggerAttachedParams * p);
+ DebuggerLaunchSetting GetDbgJITDebugLaunchSetting();
+
+public:
+ HRESULT InitAppDomainIPC(void);
+ HRESULT TerminateAppDomainIPC(void);
+
+ bool ResumeThreads(AppDomain* pAppDomain);
+
+ static DWORD WaitForSingleObjectHelper(HANDLE handle, DWORD dwMilliseconds);
+
+ void ProcessAnyPendingEvals(Thread *pThread);
+
+ bool HasLazyData();
+ RCThreadLazyInit * GetRCThreadLazyData();
+
+ // The module table is lazy init, and may be NULL. Callers must check.
+ DebuggerModuleTable * GetModuleTable();
+
+ DebuggerHeap *GetInteropSafeHeap();
+ DebuggerHeap *GetInteropSafeHeap_NoThrow();
+ DebuggerHeap *GetInteropSafeExecutableHeap();
+ DebuggerHeap *GetInteropSafeExecutableHeap_NoThrow();
+ DebuggerLazyInit *GetLazyData();
+ HelperCanary * GetCanary();
+ void MarkDebuggerAttachedInternal();
+ void MarkDebuggerUnattachedInternal();
+
+ HANDLE GetAttachEvent() { return GetLazyData()->m_exAttachEvent; }
+
+private:
+#ifndef DACCESS_COMPILE
+ void StartCanaryThread();
+#endif
+ DebuggerPendingFuncEvalTable *GetPendingEvals() { return GetLazyData()->m_pPendingEvals; }
+ SIZE_T_UNORDERED_ARRAY * GetBPMappingDuplicates() { return &GetLazyData()->m_BPMappingDuplicates; }
+ HANDLE GetUnmanagedAttachEvent() { return GetLazyData()->m_exUnmanagedAttachEvent; }
+ BOOL GetDebuggerHandlingCtrlC() { return GetLazyData()->m_DebuggerHandlingCtrlC; }
+ void SetDebuggerHandlingCtrlC(BOOL f) { GetLazyData()->m_DebuggerHandlingCtrlC = f; }
+ HANDLE GetCtrlCMutex() { return GetLazyData()->m_CtrlCMutex; }
+ UnorderedPtrArray* GetMemBlobs() { return &GetLazyData()->m_pMemBlobs; }
+
+
+ PTR_DebuggerRCThread m_pRCThread;
+ DWORD m_processId; // our pid
+ BOOL m_trappingRuntimeThreads;
+ BOOL m_stopped;
+ BOOL m_unrecoverableError;
+ BOOL m_ignoreThreadDetach;
+ PTR_DebuggerMethodInfoTable m_pMethodInfos;
+
+
+ // This is the main debugger lock. It is a large lock and used to synchronize complex operations
+ // such as sending IPC events, debugger sycnhronization, and attach / detach.
+ // The debugger effectively can't make any radical state changes without holding this lock.
+ //
+ //
+ Crst m_mutex; // The main debugger lock.
+
+ // Flag to track if the debugger Crst needs to go into "Shutdown for Finalizer" mode.
+ // This means that only special shutdown threads (helper / finalizer / shutdown) can
+ // take the lock, and all others will just block forever if they take it.
+ bool m_fShutdownMode;
+
+ //
+ // Flag to track if the VM has told the debugger that it should block all threads
+ // as soon as possible as it goes thru the debugger. As of this writing, this is
+ // done via the debugger Crst, anyone attempting to take the lock will block forever.
+ //
+ bool m_fDisabled;
+
+#ifdef _DEBUG
+ // Ownership tracking for debugging.
+ DWORD m_mutexOwner;
+
+ // Tid that last called LockForEventSending.
+ DWORD m_tidLockedForEventSending;
+#endif
+ LONG m_threadsAtUnsafePlaces;
+ Volatile<BOOL> m_jitAttachInProgress;
+
+ // True if after the jit attach we plan to send a managed non-catchup
+ // debug event
+ BOOL m_attachingForManagedEvent;
+ BOOL m_launchingDebugger;
+ BOOL m_userRequestedDebuggerLaunch;
+
+ BOOL m_LoggingEnabled;
+ AppDomainEnumerationIPCBlock *m_pAppDomainCB;
+
+ LONG m_dClassLoadCallbackCount;
+
+ // Lazily initialized array of debugger modules
+ // @dbgtodo module - eventually, DebuggerModule should go away,
+ // and all such information should be stored in either the VM's module class or in the RS.
+ DebuggerModuleTable *m_pModules;
+
+ // DacDbiInterfaceImpl needs to be able to write to private fields in the debugger class.
+ friend class DacDbiInterfaceImpl;
+
+ // Set OOP by RS to request a sync after a debug event.
+ // Clear by LS when we sync.
+ Volatile<BOOL> m_RSRequestedSync;
+
+ // send first chance/handler found callbacks for exceptions outside of JMC to the LS
+ Volatile<BOOL> m_sendExceptionsOutsideOfJMC;
+
+ // represents different thead redirection functions recognized by the debugger
+ enum HijackFunction
+ {
+ kUnhandledException = 0,
+ kRedirectedForGCThreadControl,
+ kRedirectedForDbgThreadControl,
+ kRedirectedForUserSuspend,
+ kRedirectedForYieldTask,
+ kMaxHijackFunctions,
+ };
+
+ // static array storing the range of the thread redirection functions
+ static MemoryRange s_hijackFunction[kMaxHijackFunctions];
+
+ // Currently DAC doesn't support static array members. This field is used to work around this limitation.
+ ARRAY_PTR_MemoryRange m_rgHijackFunction;
+
+public:
+
+
+ IDebuggerThreadControl *m_pIDbgThreadControl;
+
+
+ // Sometimes we force all exceptions to be non-interceptable.
+ // There are currently three cases where we set this field to true:
+ //
+ // 1) NotifyOfCHFFilter()
+ // - If the CHF filter is the first handler we encounter in the first pass, then there is no
+ // managed stack frame at which we can intercept the exception anyway.
+ //
+ // 2) LastChanceManagedException()
+ // - If Watson is launched for an unhandled exception, then the exception cannot be intercepted.
+ //
+ // 3) SecondChanceHijackFuncWorker()
+ // - The RS hijack the thread to this function to prevent the OS from killing the process at
+ // the end of the first pass. (When a debugger is attached, the OS does not run a second pass.)
+ // This function ensures that the debugger gets a second chance notification.
+ BOOL m_forceNonInterceptable;
+
+ // When we are doing an early attach, the RS shim should not queue all the fake attach events for
+ // the process, the appdomain, and the thread. Otherwise we'll get duplicate events when these
+ // entities are actually created. This flag is used to mark whether we are doing an early attach.
+ // There are still time windows where we can get duplicate events, but this flag closes down the
+ // most common scenario.
+ SVAL_DECL(BOOL, s_fEarlyAttach);
+
+private:
+ Crst * GetDebuggerDataLock() { SUPPORTS_DAC; return &GetLazyData()-> m_DebuggerDataLock; }
+
+ // This is lazily inititalized. It's just a wrapper around a handle so we embed it here.
+ DebuggerHeap m_heap;
+ DebuggerHeap m_executableHeap;
+
+ PTR_DebuggerLazyInit m_pLazyData;
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ InProcDac m_inProcDac;
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+
+
+
+ // A list of all defines that affect layout of MD types
+ typedef enum _Target_Defines
+ {
+ DEFINE__DEBUG = 1,
+ } _Target_Defines;
+
+ // A bitfield that has bits set at build time corresponding
+ // to which defines are active
+ static const int _defines = 0
+#ifdef _DEBUG
+ | DEFINE__DEBUG
+#endif
+ ;
+
+public:
+ DWORD m_defines;
+ DWORD m_mdDataStructureVersion;
+};
+
+
+
+extern "C" {
+void STDCALL FuncEvalHijack(void);
+void * STDCALL FuncEvalHijackWorker(DebuggerEval *pDE);
+
+void STDCALL ExceptionHijack(void);
+void STDCALL ExceptionHijackEnd(void);
+void STDCALL ExceptionHijackWorker(T_CONTEXT * pContext, EXCEPTION_RECORD * pRecord, EHijackReason::EHijackReason reason, void * pData);
+
+void RedirectedHandledJITCaseForGCThreadControl_Stub();
+void RedirectedHandledJITCaseForGCThreadControl_StubEnd();
+
+void RedirectedHandledJITCaseForDbgThreadControl_Stub();
+void RedirectedHandledJITCaseForDbgThreadControl_StubEnd();
+
+void RedirectedHandledJITCaseForUserSuspend_Stub();
+void RedirectedHandledJITCaseForUserSuspend_StubEnd();
+
+void RedirectedHandledJITCaseForYieldTask_Stub();
+void RedirectedHandledJITCaseForYieldTask_StubEnd();
+};
+
+
+// CNewZeroData is the allocator used by the all the hash tables that the helper thread could possibly alter. It uses
+// the interop safe allocator.
+class CNewZeroData
+{
+public:
+#ifndef DACCESS_COMPILE
+ static BYTE *Alloc(int iSize, int iMaxSize)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(g_pDebugger != NULL);
+ }
+ CONTRACTL_END;
+
+ DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow();
+ if (pHeap == NULL)
+ {
+ return NULL;
+ }
+
+ BYTE *pb = (BYTE *) pHeap->Alloc(iSize);
+ if (pb == NULL)
+ {
+ return NULL;
+ }
+
+ memset(pb, 0, iSize);
+ return pb;
+ }
+ static void Free(BYTE *pPtr, int iSize)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(g_pDebugger != NULL);
+ }
+ CONTRACTL_END;
+
+
+ DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow();
+ _ASSERTE(pHeap != NULL); // should already exist
+
+ pHeap->Free(pPtr);
+ }
+ static BYTE *Grow(BYTE *&pPtr, int iCurSize)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(g_pDebugger != NULL);
+ }
+ CONTRACTL_END;
+
+ void *p;
+
+ DebuggerHeap* pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow();
+ _ASSERTE(pHeap != NULL); // should already exist
+
+ PREFIX_ASSUME( iCurSize >= 0 );
+ S_UINT32 iNewSize = S_UINT32( iCurSize ) + S_UINT32( GrowSize(iCurSize) );
+ if( iNewSize.IsOverflow() )
+ {
+ return NULL;
+ }
+ p = pHeap->Realloc(pPtr, iNewSize.Value(), iCurSize);
+ if (p == NULL)
+ {
+ return NULL;
+ }
+
+ memset((BYTE*)p+iCurSize, 0, GrowSize(iCurSize));
+ return (pPtr = (BYTE *)p);
+ }
+
+ // A hashtable may recycle memory. We need to zero it out again.
+ static void Clean(BYTE * pData, int iSize)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ memset(pData, 0, iSize);
+ }
+#endif // DACCESS_COMPILE
+
+ static int RoundSize(int iSize)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (iSize);
+ }
+ static int GrowSize(int iCurSize)
+ {
+ LIMITED_METHOD_CONTRACT;
+ int newSize = (3 * iCurSize) / 2;
+ return (newSize < 256) ? 256 : newSize;
+ }
+};
+
+class DebuggerPendingFuncEvalTable : private CHashTableAndData<CNewZeroData>
+{
+ private:
+
+ BOOL Cmp(SIZE_T k1, const HASHENTRY * pc2)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#if defined(DACCESS_COMPILE)
+ // This function hasn't been tested yet in the DAC build. Make sure the DACization is correct.
+ DacNotImpl();
+#endif // DACCESS_COMPILE
+
+ Thread * pThread1 = reinterpret_cast<Thread *>(k1);
+ Thread * pThread2 = dac_cast<PTR_DebuggerPendingFuncEval>(const_cast<HASHENTRY *>(pc2))->pThread;
+
+ return (pThread1 != pThread2);
+ }
+
+ ULONG HASH(Thread* pThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (ULONG)((SIZE_T)pThread); // only use low 32-bits if 64-bit
+ }
+
+
+ SIZE_T KEY(Thread * pThread)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (SIZE_T)pThread;
+ }
+
+ public:
+
+#ifndef DACCESS_COMPILE
+ DebuggerPendingFuncEvalTable() : CHashTableAndData<CNewZeroData>(11)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ NewInit(11, sizeof(DebuggerPendingFuncEval), 11);
+ }
+
+ void AddPendingEval(Thread *pThread, DebuggerEval *pDE)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE((pThread != NULL) && (pDE != NULL));
+
+ DebuggerPendingFuncEval *pfe = (DebuggerPendingFuncEval*)Add(HASH(pThread));
+ pfe->pThread = pThread;
+ pfe->pDE = pDE;
+ }
+
+ void RemovePendingEval(Thread* pThread)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(pThread != NULL);
+
+ DebuggerPendingFuncEval *entry = (DebuggerPendingFuncEval*)Find(HASH(pThread), KEY(pThread));
+ Delete(HASH(pThread), (HASHENTRY*)entry);
+ }
+
+#endif // #ifndef DACCESS_COMPILE
+
+ DebuggerPendingFuncEval *GetPendingEval(Thread* pThread)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ DebuggerPendingFuncEval *entry = (DebuggerPendingFuncEval*)Find(HASH(pThread), KEY(pThread));
+ return entry;
+ }
+};
+
+struct DebuggerModuleEntry
+{
+ FREEHASHENTRY entry;
+ PTR_DebuggerModule module;
+};
+
+typedef DPTR(struct DebuggerModuleEntry) PTR_DebuggerModuleEntry;
+
+class DebuggerModuleTable : private CHashTableAndData<CNewZeroData>
+{
+ private:
+
+ BOOL Cmp(SIZE_T k1, const HASHENTRY * pc2)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#if defined(DACCESS_COMPILE)
+ // This function hasn't been tested yet in the DAC build. Make sure the DACization is correct.
+ DacNotImpl();
+#endif // DACCESS_COMPILE
+
+ Module * pModule1 = reinterpret_cast<Module *>(k1);
+ Module * pModule2 =
+ dac_cast<PTR_DebuggerModuleEntry>(const_cast<HASHENTRY *>(pc2))->module->GetRuntimeModule();
+
+ return (pModule1 != pModule2);
+ }
+
+ ULONG HASH(Module* module)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (ULONG)((SIZE_T)module); // only use low 32-bits if 64-bit
+ }
+
+ SIZE_T KEY(Module * pModule)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (SIZE_T)pModule;
+ }
+
+#ifdef _DEBUG
+ bool ThreadHoldsLock();
+#endif
+
+public:
+
+#ifndef DACCESS_COMPILE
+
+ DebuggerModuleTable();
+ ~DebuggerModuleTable();
+
+ void AddModule(DebuggerModule *module);
+
+ void RemoveModule(Module* module, AppDomain *pAppDomain);
+
+
+ void Clear();
+
+ //
+ // RemoveModules removes any module loaded into the given appdomain from the hash. This is used when we send an
+ // ExitAppdomain event to ensure that there are no leftover modules in the hash. This can happen when we have shared
+ // modules that aren't properly accounted for in the CLR. We miss sending UnloadModule events for those modules, so
+ // we clean them up with this method.
+ //
+ void RemoveModules(AppDomain *pAppDomain);
+#endif // #ifndef DACCESS_COMPILE
+
+ DebuggerModule *GetModule(Module* module);
+
+ // We should never look for a NULL Module *
+ DebuggerModule *GetModule(Module* module, AppDomain* pAppDomain);
+ DebuggerModule *GetFirstModule(HASHFIND *info);
+ DebuggerModule *GetNextModule(HASHFIND *info);
+};
+
+// struct DebuggerMethodInfoKey: Key for each of the method info hash table entries.
+// Module * m_pModule: This and m_token make up the key
+// mdMethodDef m_token: This and m_pModule make up the key
+//
+// Note: This is used for hashing, so the structure must be totally blittable.
+typedef DPTR(struct DebuggerMethodInfoKey) PTR_DebuggerMethodInfoKey;
+struct DebuggerMethodInfoKey
+{
+ PTR_Module pModule;
+ mdMethodDef token;
+} ;
+
+// struct DebuggerMethodInfoEntry: Entry for the JIT info hash table.
+// FREEHASHENTRY entry: Needed for use by the hash table
+// DebuggerMethodInfo * ji: The actual DebuggerMethodInfo to
+// hash. Note that DMI's will be hashed by MethodDesc.
+typedef DPTR(struct DebuggerMethodInfoEntry) PTR_DebuggerMethodInfoEntry;
+struct DebuggerMethodInfoEntry
+{
+ FREEHASHENTRY entry;
+ DebuggerMethodInfoKey key;
+ SIZE_T nVersion;
+ SIZE_T nVersionLastRemapped;
+ PTR_DebuggerMethodInfo mi;
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+};
+
+// class DebuggerMethodInfoTable: Hash table to hold all the non-JIT related
+// info for each method we see. The JIT infos live in a seperate table
+// keyed by MethodDescs - there may be multiple
+// JITted realizations of each MethodDef, e.g. under different generic
+// assumptions. Hangs off of the Debugger object.
+// INVARIANT: There is only one DebuggerMethodInfo per method
+// in the table. Note that DMI's will be hashed by MethodDesc.
+//
+class DebuggerMethodInfoTable : private CHashTableAndData<CNewZeroData>
+{
+ VPTR_BASE_CONCRETE_VTABLE_CLASS(DebuggerMethodInfoTable);
+
+ private:
+ BOOL Cmp(SIZE_T k1, const HASHENTRY * pc2)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // This is the inverse of the KEY() function.
+ DebuggerMethodInfoKey * pDjik = reinterpret_cast<DebuggerMethodInfoKey *>(k1);
+
+ DebuggerMethodInfoEntry * pDjie = dac_cast<PTR_DebuggerMethodInfoEntry>(const_cast<HASHENTRY *>(pc2));
+
+ return (pDjik->pModule != pDjie->key.pModule) ||
+ (pDjik->token != pDjie->key.token);
+ }
+
+ ULONG HASH(DebuggerMethodInfoKey* pDjik)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return HashPtr( pDjik->token, pDjik->pModule );
+ }
+
+ SIZE_T KEY(DebuggerMethodInfoKey * pDjik)
+ {
+ // This is casting a host pointer to a SIZE_T. So that key is restricted to the host address space.
+ // This key is just passed to Cmp(), which will cast it back to a DebuggerMethodInfoKey*.
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (SIZE_T)pDjik;
+ }
+
+//#define _DEBUG_DMI_TABLE
+
+#ifdef _DEBUG_DMI_TABLE
+public:
+ ULONG CheckDmiTable();
+
+#define CHECK_DMI_TABLE (CheckDmiTable())
+#define CHECK_DMI_TABLE_DEBUGGER (m_pMethodInfos->CheckDmiTable())
+
+#else
+
+#define CHECK_DMI_TABLE
+#define CHECK_DMI_TABLE_DEBUGGER
+
+#endif // _DEBUG_DMI_TABLE
+
+ public:
+
+#ifndef DACCESS_COMPILE
+
+ DebuggerMethodInfoTable();
+
+ HRESULT AddMethodInfo(Module *pModule,
+ mdMethodDef token,
+ DebuggerMethodInfo *mi);
+
+ HRESULT OverwriteMethodInfo(Module *pModule,
+ mdMethodDef token,
+ DebuggerMethodInfo *mi,
+ BOOL fOnlyIfNull);
+
+ // pModule is being unloaded - remove any entries that belong to it. Why?
+ // (a) Correctness: the module can be reloaded at the same address,
+ // which will cause accidental matches with our hashtable (indexed by
+ // {Module*,mdMethodDef}
+ // (b) Perf: don't waste the memory!
+ void ClearMethodsOfModule(Module *pModule);
+ void DeleteEntryDMI(DebuggerMethodInfoEntry *entry);
+
+#endif // #ifndef DACCESS_COMPILE
+
+ DebuggerMethodInfo *GetMethodInfo(Module *pModule, mdMethodDef token);
+ DebuggerMethodInfo *GetFirstMethodInfo(HASHFIND *info);
+ DebuggerMethodInfo *GetNextMethodInfo(HASHFIND *info);
+
+#ifdef DACCESS_COMPILE
+ void EnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+#endif
+};
+
+/* ------------------------------------------------------------------------ *
+ * DebuggerEval class
+ *
+ * Note that arguments get passsed in a block allocated when
+ * the func-eval is set up. The setup phase passes the total count of arguments.
+ *
+ * In some situations type arguments must also be passed, e.g.
+ * when performing a "newarr" operation or calling a generic function with a
+ * "funceval". In the setup phase we pass a count of the number of
+ * nodes in the "flattened" type expressions for the type arguments, if any.
+ * e.g. for calls to non-generic code this is 0.
+ * - for "newobj List<int>" this is 1: there is one type argument "int".
+ * - for "newobj Dict<string,int>" this is 2: there are two
+ * type arguments "string" and "int".
+ * - for "newobj Dict<string,List<int>>" this is 3: there are two
+ type arguments but the second contains two nodes (one for List and one for int).
+ * The type argument will get placed in the allocated argument block,
+ * the order being determined by the order they occur in the tree, i.e.
+ * left-to-right, top-to-bottom in the type expressions tree, e.g. for
+ * type arguments <string,List<int>> you get string followed by List followed by int.
+ * ------------------------------------------------------------------------ */
+
+class DebuggerEval
+{
+public:
+
+ //
+ // Used as a bit field.
+ //
+ enum FUNC_EVAL_ABORT_TYPE
+ {
+ FE_ABORT_NONE = 0,
+ FE_ABORT_NORMAL = 1,
+ FE_ABORT_RUDE = 2
+ };
+
+ // Note: this first field must be big enough to hold a breakpoint
+ // instruction, and it MUST be the first field. (This
+ // is asserted in debugger.cpp)
+ BYTE m_breakpointInstruction[CORDbg_BREAK_INSTRUCTION_SIZE];
+ T_CONTEXT m_context;
+ Thread *m_thread;
+ DebuggerIPCE_FuncEvalType m_evalType;
+ mdMethodDef m_methodToken;
+ mdTypeDef m_classToken;
+ ADID m_appDomainId; // Safe even if AD unloaded
+ PTR_DebuggerModule m_debuggerModule; // Only valid if AD is still around
+ RSPTR_CORDBEVAL m_funcEvalKey;
+ bool m_successful; // Did the eval complete successfully
+ Debugger::AreValueTypesBoxed m_retValueBoxing; // Is the return value boxed?
+ unsigned int m_argCount;
+ unsigned int m_genericArgsCount;
+ unsigned int m_genericArgsNodeCount;
+ SIZE_T m_stringSize;
+ BYTE *m_argData;
+ MethodDesc *m_md;
+ PCODE m_targetCodeAddr;
+ INT64 m_result;
+ TypeHandle m_resultType;
+ SIZE_T m_arrayRank;
+ FUNC_EVAL_ABORT_TYPE m_aborting; // Has an abort been requested, and what type.
+ bool m_aborted; // Was this eval aborted
+ bool m_completed; // Is the eval complete - successfully or by aborting
+ bool m_evalDuringException;
+ bool m_rethrowAbortException;
+ Thread::ThreadAbortRequester m_requester; // For aborts, what kind?
+ VMPTR_OBJECTHANDLE m_vmObjectHandle;
+ TypeHandle m_ownerTypeHandle;
+
+ DebuggerEval(T_CONTEXT * pContext, DebuggerIPCE_FuncEvalInfo * pEvalInfo, bool fInException);
+
+ // This constructor is only used when setting up an eval to re-abort a thread.
+ DebuggerEval(T_CONTEXT * pContext, Thread * pThread, Thread::ThreadAbortRequester requester);
+
+ bool Init()
+ {
+ _ASSERTE(DbgIsExecutable(&m_breakpointInstruction, sizeof(m_breakpointInstruction)));
+ return true;
+ }
+
+
+ // The m_argData buffer holds both the type arg data (for generics) and the main argument data.
+ //
+ // For DB_IPCE_FET_NEW_STRING it holds the data specifying the string to create.
+ DebuggerIPCE_TypeArgData *GetTypeArgData()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (DebuggerIPCE_TypeArgData *) (m_argData);
+ }
+
+ DebuggerIPCE_FuncEvalArgData *GetArgData()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (DebuggerIPCE_FuncEvalArgData*) (m_argData + m_genericArgsNodeCount * sizeof(DebuggerIPCE_TypeArgData));
+ }
+
+ WCHAR *GetNewStringArgData()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_evalType == DB_IPCE_FET_NEW_STRING);
+ return (WCHAR*)m_argData;
+ }
+
+ ~DebuggerEval()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ // Clean up any temporary buffers used to send the argument type information. These were allocated
+ // in respnse to a GET_BUFFER message
+ DebuggerIPCE_FuncEvalArgData *argData = GetArgData();
+ for (unsigned int i = 0; i < m_argCount; i++)
+ {
+ if (argData[i].fullArgType != NULL)
+ {
+ _ASSERTE(g_pDebugger != NULL);
+ g_pDebugger->ReleaseRemoteBuffer((BYTE*)argData[i].fullArgType, true);
+ }
+ }
+
+ // Clean up the array of argument information. This was allocated as part of Func Eval setup.
+ if (m_argData)
+ {
+ DeleteInteropSafe(m_argData);
+ }
+
+#ifdef _DEBUG
+ // Set flags to strategic values in case we access deleted memory.
+ m_completed = false;
+ m_rethrowAbortException = true;
+#endif
+ }
+};
+
+/* ------------------------------------------------------------------------ *
+ * New/delete overrides to use the debugger's private heap
+ * ------------------------------------------------------------------------ */
+
+class InteropSafe {};
+#define interopsafe (*(InteropSafe*)NULL)
+
+class InteropSafeExecutable {};
+#define interopsafeEXEC (*(InteropSafeExecutable*)NULL)
+
+#ifndef DACCESS_COMPILE
+inline void * __cdecl operator new(size_t n, const InteropSafe&)
+{
+ CONTRACTL
+ {
+ THROWS; // throw on OOM
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_pDebugger != NULL);
+ void *result = g_pDebugger->GetInteropSafeHeap()->Alloc((DWORD)n);
+ if (result == NULL) {
+ ThrowOutOfMemory();
+ }
+ return result;
+}
+
+inline void * __cdecl operator new[](size_t n, const InteropSafe&)
+{
+ CONTRACTL
+ {
+ THROWS; // throw on OOM
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+ _ASSERTE(g_pDebugger != NULL);
+ void *result = g_pDebugger->GetInteropSafeHeap()->Alloc((DWORD)n);
+ if (result == NULL) {
+ ThrowOutOfMemory();
+ }
+ return result;
+}
+
+inline void * __cdecl operator new(size_t n, const InteropSafe&, const NoThrow&)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_pDebugger != NULL);
+ DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow();
+ if (pHeap == NULL)
+ {
+ return NULL;
+ }
+ void *result = pHeap->Alloc((DWORD)n);
+ return result;
+}
+
+inline void * __cdecl operator new[](size_t n, const InteropSafe&, const NoThrow&)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_pDebugger != NULL);
+ DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow();
+ if (pHeap == NULL)
+ {
+ return NULL;
+ }
+ void *result = pHeap->Alloc((DWORD)n);
+ return result;
+}
+
+// Note: there is no C++ syntax for manually invoking this, but if a constructor throws an exception I understand that
+// this delete operator will be invoked automatically to destroy the object.
+inline void __cdecl operator delete(void *p, const InteropSafe&)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (p != NULL)
+ {
+ _ASSERTE(g_pDebugger != NULL);
+ DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow();
+ _ASSERTE(pHeap != NULL); // should have had heap around if we're deleting
+ pHeap->Free(p);
+ }
+}
+
+// Note: there is no C++ syntax for manually invoking this, but if a constructor throws an exception I understand that
+// this delete operator will be invoked automatically to destroy the object.
+inline void __cdecl operator delete[](void *p, const InteropSafe&)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (p != NULL)
+ {
+ _ASSERTE(g_pDebugger != NULL);
+ DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow();
+ _ASSERTE(pHeap != NULL); // should have had heap around if we're deleting
+
+ pHeap->Free(p);
+ }
+}
+
+//
+// Interop safe delete to match the interop safe new's above. There is no C++ syntax for actually invoking those interop
+// safe delete operators above, so we use this method to accomplish the same thing.
+//
+template<class T> void DeleteInteropSafe(T *p)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Don't stop a thread that may hold the Interop-safe heap lock.
+ // It may be in preemptive, but it's still "inside" the CLR and so inside the "Can't-Stop-Region"
+ CantStopHolder hHolder;
+
+ if (p != NULL)
+ {
+ p->~T();
+
+ _ASSERTE(g_pDebugger != NULL);
+ DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow();
+ _ASSERTE(pHeap != NULL); // should have had heap around if we're deleting
+
+ pHeap->Free(p);
+ }
+}
+
+inline void * __cdecl operator new(size_t n, const InteropSafeExecutable&)
+{
+ CONTRACTL
+ {
+ THROWS; // throw on OOM
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_pDebugger != NULL);
+ void *result = g_pDebugger->GetInteropSafeExecutableHeap()->Alloc((DWORD)n);
+ if (result == NULL) {
+ ThrowOutOfMemory();
+ }
+ return result;
+}
+
+inline void * __cdecl operator new(size_t n, const InteropSafeExecutable&, const NoThrow&)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_pDebugger != NULL);
+ DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeExecutableHeap_NoThrow();
+ if (pHeap == NULL)
+ {
+ return NULL;
+ }
+ void *result = pHeap->Alloc((DWORD)n);
+ return result;
+}
+
+// Note: there is no C++ syntax for manually invoking this, but if a constructor throws an exception I understand that
+// this delete operator will be invoked automatically to destroy the object.
+inline void __cdecl operator delete(void *p, const InteropSafeExecutable&)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if (p != NULL)
+ {
+ _ASSERTE(g_pDebugger != NULL);
+ DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeExecutableHeap_NoThrow();
+ _ASSERTE(pHeap != NULL); // should have had heap around if we're deleting
+ pHeap->Free(p);
+ }
+}
+
+//
+// Interop safe delete to match the interop safe new's above. There is no C++ syntax for actually invoking those interop
+// safe delete operators above, so we use this method to accomplish the same thing.
+//
+template<class T> void DeleteInteropSafeExecutable(T *p)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Don't stop a thread that may hold the Interop-safe heap lock.
+ // It may be in preemptive, but it's still "inside" the CLR and so inside the "Can't-Stop-Region"
+ CantStopHolder hHolder;
+
+ if (p != NULL)
+ {
+ p->~T();
+
+ _ASSERTE(g_pDebugger != NULL);
+ DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeExecutableHeap_NoThrow();
+ _ASSERTE(pHeap != NULL); // should have had heap around if we're deleting
+
+ pHeap->Free(p);
+ }
+}
+#endif // DACCESS_COMPILE
+
+
+#if _DEBUG
+#define DBG_RUNTIME_MAX ((DB_IPCE_RUNTIME_LAST&0xff)+1)
+#define DBG_DEBUGGER_MAX ((DB_IPCE_DEBUGGER_LAST&0xff)+1)
+
+#define DbgLog(event) DbgLogHelper(event)
+void DbgLogHelper(DebuggerIPCEventType event);
+#else
+#define DbgLog(event)
+#endif // _DEBUG
+
+//-----------------------------------------------------------------------------
+// Helpers for cleanup
+// These are various utility functions, mainly where we factor out code.
+//-----------------------------------------------------------------------------
+void GetPidDecoratedName(__out_z __in_ecount(cBufSizeInChars) WCHAR * pBuf,
+ int cBufSizeInChars,
+ const WCHAR * pPrefix);
+
+// Specify type of Win32 event
+enum EEventResetType {
+ kManualResetEvent = TRUE,
+ kAutoResetEvent = FALSE
+};
+
+HANDLE CreateWin32EventOrThrow(
+ LPSECURITY_ATTRIBUTES lpEventAttributes,
+ EEventResetType eType,
+ BOOL bInitialState
+);
+
+HANDLE OpenWin32EventOrThrow(
+ DWORD dwDesiredAccess,
+ BOOL bInheritHandle,
+ LPCWSTR lpName
+);
+
+// @todo - should this be moved into where we defined IPCWriterInterface?
+// Holder for security Attribute
+// Old code:
+// hr = g_pIPCManagerInterface->GetSecurityAttributes(GetCurrentProcessId(), &pSA);
+// .... foo(pSa)...
+// g_pIPCManagerInterface->DestroySecurityAttributes(pSA);
+//
+// new code:
+// {
+// SAHolder x(g_pIPCManagerInterface, GetCurrentProcessId());
+// .... foo(x.GetSA()) ..
+// } // calls dtor
+class IPCHostSecurityAttributeHolder
+{
+public:
+ IPCHostSecurityAttributeHolder(DWORD pid);
+ ~IPCHostSecurityAttributeHolder();
+
+ SECURITY_ATTRIBUTES * GetHostSA();
+
+protected:
+ SECURITY_ATTRIBUTES *m_pSA; // the resource we're protecting.
+};
+
+#define SENDIPCEVENT_RAW_BEGIN_EX(pDbgLockHolder, gcxStmt) \
+ { \
+ Debugger::DebuggerLockHolder *__pDbgLockHolder = pDbgLockHolder; \
+ gcxStmt; \
+ g_pDebugger->LockForEventSending(__pDbgLockHolder);
+
+#define SENDIPCEVENT_RAW_END_EX \
+ g_pDebugger->UnlockFromEventSending(__pDbgLockHolder); \
+ }
+
+#define SENDIPCEVENT_RAW_BEGIN(pDbgLockHolder) \
+ SENDIPCEVENT_RAW_BEGIN_EX(pDbgLockHolder, GCX_PREEMP_EEINTERFACE_TOGGLE_COND(CORDebuggerAttached()))
+
+#define SENDIPCEVENT_RAW_END SENDIPCEVENT_RAW_END_EX
+
+// Suspend-aware SENDIPCEVENT macros:
+// Check whether __thread has been suspended by the debugger via SetDebugState().
+// If this thread has been suspended, it shouldn't send any event to the RS because the
+// debugger may not be expecting it. Instead, just leave the lock and retry.
+// When we leave, we'll enter coop mode first and get suspended if a suspension is in progress.
+// Afterwards, we'll transition back into preemptive mode, and we'll block because this thread
+// has been suspended by the debugger (see code:Thread::RareEnablePreemptiveGC).
+#define SENDIPCEVENT_BEGIN_EX(pDebugger, thread, gcxStmt) \
+ { \
+ FireEtwDebugIPCEventStart(); \
+ bool __fRetry = true; \
+ do \
+ { \
+ { \
+ Debugger::DebuggerLockHolder __dbgLockHolder(pDebugger, FALSE); \
+ Debugger::DebuggerLockHolder *__pDbgLockHolder = &__dbgLockHolder; \
+ gcxStmt; \
+ g_pDebugger->LockForEventSending(__pDbgLockHolder); \
+ /* Check if the thread has been suspended by the debugger via SetDebugState(). */ \
+ if (thread != NULL && thread->HasThreadStateNC(Thread::TSNC_DebuggerUserSuspend)) \
+ { \
+ /* Just leave the lock and retry (see comment above for explanation */ \
+ } \
+ else \
+ { \
+ __fRetry = false; \
+
+#define SENDIPCEVENT_END_EX \
+ ; \
+ } \
+ g_pDebugger->UnlockFromEventSending(__pDbgLockHolder); \
+ } /* ~gcxStmt & ~DebuggerLockHolder */ \
+ } while (__fRetry); \
+ FireEtwDebugIPCEventEnd(); \
+ }
+
+
+// The typical SENDIPCEVENT - toggles the GC mode...
+#define SENDIPCEVENT_BEGIN(pDebugger, thread) \
+ SENDIPCEVENT_BEGIN_EX(pDebugger, thread, GCX_PREEMP_EEINTERFACE_TOGGLE_IFTHREAD_COND(CORDebuggerAttached()))
+
+// Convenience macro to match SENDIPCEVENT_BEGIN
+#define SENDIPCEVENT_END SENDIPCEVENT_END_EX
+
+
+// Use this if you need to access the DebuggerLockHolder set up by SENDIPCEVENT_BEGIN.
+// This is valid only between the SENDIPCEVENT_BEGIN / SENDIPCEVENT_END macros
+#define SENDIPCEVENT_PtrDbgLockHolder __pDbgLockHolder
+
+
+// Common contract for sending events.
+// Used inbetween SENDIPCEVENT_BEGIN & _END.
+//
+// Can't GC trigger b/c if we're sycning we'll deadlock:
+// - We'll block at the GC toggle (b/c we're syncing).
+// - But we're holding the LockForEventSending "lock", so we'll block the helper trying to send a
+// SuspendComplete
+//
+// @todo- we could also assert that:
+// - m_tidLockedForEventSending = GetCurrentThreadId();
+#define SENDEVENT_CONTRACT_ITEMS \
+ GC_NOTRIGGER; \
+ MODE_PREEMPTIVE; \
+ PRECONDITION(g_pDebugger->ThreadHoldsLock()); \
+ PRECONDITION(!g_pDebugger->IsStopped()); \
+
+
+//-----------------------------------------------------------------------------
+// Sample usage for sending IPC _Notification_ events.
+// This is different then SendIPCReply (which is used to reply to events
+// initiated by the RS).
+//-----------------------------------------------------------------------------
+
+// Thread *pThread = g_pEEInterface->GetThread();
+// SENDIPCEVENT_BEGIN(g_pDebugger, pThread); // or use "this" if inside a Debugger method
+// _ASSERTE(ThreadHoldsLock()); // we now hold the debugger lock.
+// // debugger may have detached while we were blocked above.
+//
+// if (CORDebuggerAttached()) {
+// // Send as many IPC events as we wish.
+// SendIPCEvent(....);
+// SendIPCEvent(....);
+// SendIPCEvent(....);
+//
+// if (we sent an event) {
+// TrapAllRuntimeThreads();
+// }
+// }
+//
+// // We block here while the debugger responds to the event.
+// SENDIPCEVENT_END;
+
+// Or if we just want to send a single IPC event and block, we can do this:
+//
+// < ... Init IPC Event ...>
+// SendSimpleIPCEventAndBlock(); <-- this will block
+//
+// Note we don't have to call SENDIPCEVENT_BEGIN / END in this case.
+
+// @todo - further potential cleanup to the IPC sending:
+// - Make SendIPCEvent + TrapAllRuntimeThreads check for CORDebuggerAttached() so that we
+// can always call them after SENDIPCEVENT_BEGIN
+// - Assert that SendIPCEVent is only called inbetween a Begin/End pair
+// - count if we actually send any IPCEvents inbetween a Begin/End pair, and then have
+// SendIPCEvent_END call TrapAllRuntimeThreads automatically for us.
+
+
+// Include all of the inline stuff now.
+#include "debugger.inl"
+
+
+//
+//
+//
+// The below contract defines should only be used (A) if they apply, and (B) they are the LEAST
+// definitive for the function you are contracting. The below defines represent the baseline contract
+// for each case.
+//
+// e.g. If a function FOO() throws, always, you should use THROWS, not any of the below.
+//
+//
+//
+#if _DEBUG
+
+#define MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT \
+ if (!m_pRCThread->IsRCThreadReady()) { THROWS; } else { NOTHROW; }
+
+#define MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT \
+ if (!m_pRCThread->IsRCThreadReady() || (GetThread() != NULL)) { GC_TRIGGERS; } else { GC_NOTRIGGER; }
+
+#define GC_TRIGGERS_FROM_GETJITINFO if (GetThreadNULLOk() != NULL) { GC_TRIGGERS; } else { GC_NOTRIGGER; }
+
+//
+// The DebuggerDataLock lock is UNSAFE_ANYMODE, which means that we cannot
+// take a GC while someone is holding it. Unfortunately this means that
+// we cannot contract for a "possible" GC trigger statically, and must
+// rely on runtime coverage to find any code path that may cause a GC.
+//
+#define CALLED_IN_DEBUGGERDATALOCK_HOLDER_SCOPE_MAY_GC_TRIGGERS_CONTRACT WRAPPER(GC_TRIGGERS)
+
+#else
+
+#define MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT
+#define MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT
+#define CALLED_IN_DEBUGGERDATALOCK_HOLDER_SCOPE_MAY_GC_TRIGGERS_CONTRACT
+
+#define GC_TRIGGERS_FROM_GETJITINFO
+
+#endif
+
+// Returns true if the specified IL offset has a special meaning (eg. prolog, etc.)
+bool DbgIsSpecialILOffset(DWORD offset);
+
+#if defined(_WIN64) || defined(_TARGET_ARM_)
+void FixupDispatcherContext(T_DISPATCHER_CONTEXT* pDispatcherContext, T_CONTEXT* pContext, T_CONTEXT* pOriginalContext, PEXCEPTION_ROUTINE pUnwindPersonalityRoutine = NULL);
+#endif
+
+#endif /* DEBUGGER_H_ */
+
diff --git a/src/debug/ee/debugger.inl b/src/debug/ee/debugger.inl
new file mode 100644
index 0000000000..8d24882f00
--- /dev/null
+++ b/src/debug/ee/debugger.inl
@@ -0,0 +1,304 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: debugger.inl
+//
+
+//
+// Inline definitions for the Left-Side of the CLR debugging services
+// This is logically part of the header file.
+//
+//*****************************************************************************
+
+#ifndef DEBUGGER_INL_
+#define DEBUGGER_INL_
+
+//=============================================================================
+// Inlined methods for Debugger.
+//=============================================================================
+inline bool Debugger::HasLazyData()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_pLazyData != NULL);
+}
+inline RCThreadLazyInit *Debugger::GetRCThreadLazyData()
+{
+ LIMITED_METHOD_CONTRACT;
+ return &(GetLazyData()->m_RCThread);
+}
+
+inline DebuggerLazyInit *Debugger::GetLazyData()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(m_pLazyData != NULL);
+ return m_pLazyData;
+}
+
+inline DebuggerModuleTable * Debugger::GetModuleTable()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pModules;
+}
+
+
+//=============================================================================
+// Inlined methods for DebuggerModule.
+//=============================================================================
+
+
+//-----------------------------------------------------------------------------
+// Constructor for a Debugger-Module.
+// @dbgtodo inspection - get rid of this entire class as we move things out-of-proc.
+//-----------------------------------------------------------------------------
+inline DebuggerModule::DebuggerModule(Module * pRuntimeModule,
+ DomainFile * pDomainFile,
+ AppDomain * pAppDomain) :
+ m_enableClassLoadCallbacks(FALSE),
+ m_pPrimaryModule(NULL),
+ m_pRuntimeModule(pRuntimeModule),
+ m_pRuntimeDomainFile(pDomainFile),
+ m_pAppDomain(pAppDomain)
+{
+ LOG((LF_CORDB,LL_INFO10000, "DM::DM this:0x%x Module:0x%x DF:0x%x AD:0x%x\n",
+ this, pRuntimeModule, pDomainFile, pAppDomain));
+
+ // Pick a primary module.
+ // Arguably, this could be in DebuggerModuleTable::AddModule
+ PickPrimaryModule();
+
+
+ // Do we have any optimized code?
+ DWORD dwDebugBits = pRuntimeModule->GetDebuggerInfoBits();
+ m_fHasOptimizedCode = CORDebuggerAllowJITOpts(dwDebugBits);
+
+ // Dynamic modules must receive ClassLoad callbacks in order to receive metadata updates as the module
+ // evolves. So we force this on here and refuse to change it for all dynamic modules.
+ if (pRuntimeModule->IsReflection())
+ {
+ EnableClassLoadCallbacks(TRUE);
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Returns true if we have any optimized code in the module.
+//
+// Notes:
+// JMC-probes aren't emitted in optimized code.
+// <TODO> Life would be nice if the Jit tracked this. </TODO>
+//-----------------------------------------------------------------------------
+inline bool DebuggerModule::HasAnyOptimizedCode()
+{
+ LIMITED_METHOD_CONTRACT;
+ Module * pModule = this->GetPrimaryModule()->GetRuntimeModule();
+ DWORD dwDebugBits = pModule->GetDebuggerInfoBits();
+ return CORDebuggerAllowJITOpts(dwDebugBits);
+}
+
+//-----------------------------------------------------------------------------
+// Return true if we've enabled class-load callbacks.
+//-----------------------------------------------------------------------------
+inline BOOL DebuggerModule::ClassLoadCallbacksEnabled(void)
+{
+ return m_enableClassLoadCallbacks;
+}
+
+//-----------------------------------------------------------------------------
+// Set whether we should enable class-load callbacks for this module.
+//-----------------------------------------------------------------------------
+inline void DebuggerModule::EnableClassLoadCallbacks(BOOL f)
+{
+ if (m_enableClassLoadCallbacks != f)
+ {
+ if (f)
+ {
+ _ASSERTE(g_pDebugger != NULL);
+ g_pDebugger->IncrementClassLoadCallbackCount();
+ }
+ else
+ {
+ _ASSERTE(g_pDebugger != NULL);
+ g_pDebugger->DecrementClassLoadCallbackCount();
+ }
+
+ m_enableClassLoadCallbacks = f;
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Return the appdomain that this module exists in.
+//-----------------------------------------------------------------------------
+inline AppDomain* DebuggerModule::GetAppDomain()
+{
+ return m_pAppDomain;
+}
+
+//-----------------------------------------------------------------------------
+// Return the EE module that this module corresponds to.
+//-----------------------------------------------------------------------------
+inline Module * DebuggerModule::GetRuntimeModule()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pRuntimeModule;
+}
+
+//-----------------------------------------------------------------------------
+// <TODO> (8/12/2002)
+// Currently we create a new DebuggerModules for each appdomain a shared
+// module lives in. We then pretend there aren't any shared modules.
+// This is bad. We need to move away from this.
+// Once we stop lying, then every module will be it's own PrimaryModule. :)
+//
+// Currently, Module* is 1:n w/ DebuggerModule.
+// We add a notion of PrimaryModule so that:
+// Module* is 1:1 w/ DebuggerModule::GetPrimaryModule();
+// This should help transition towards exposing shared modules.
+// If the Runtime module is shared, then this gives a common DM.
+// If the runtime module is not shared, then this is an identity function.
+// </TODO>
+//-----------------------------------------------------------------------------
+inline DebuggerModule * DebuggerModule::GetPrimaryModule()
+{
+ _ASSERTE(m_pPrimaryModule != NULL);
+ return m_pPrimaryModule;
+}
+
+//-----------------------------------------------------------------------------
+// This is called by DebuggerModuleTable to set our primary module.
+//-----------------------------------------------------------------------------
+inline void DebuggerModule::SetPrimaryModule(DebuggerModule * pPrimary)
+{
+ _ASSERTE(pPrimary != NULL);
+ // Our primary module must by definition refer to the same runtime module as us
+ _ASSERTE(pPrimary->GetRuntimeModule() == this->GetRuntimeModule());
+
+ LOG((LF_CORDB, LL_EVERYTHING, "DM::SetPrimaryModule - this=%p, pPrimary=%p\n", this, pPrimary));
+ m_pPrimaryModule = pPrimary;
+}
+
+inline DebuggerEval * FuncEvalFrame::GetDebuggerEval()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pDebuggerEval;
+}
+
+inline unsigned FuncEvalFrame::GetFrameAttribs(void)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (GetDebuggerEval()->m_evalDuringException)
+ {
+ return FRAME_ATTR_NONE;
+ }
+ else
+ {
+ return FRAME_ATTR_RESUMABLE; // Treat the next frame as the top frame.
+ }
+}
+
+inline TADDR FuncEvalFrame::GetReturnAddressPtr()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ if (GetDebuggerEval()->m_evalDuringException)
+ {
+ return NULL;
+ }
+ else
+ {
+ return PTR_HOST_MEMBER_TADDR(FuncEvalFrame, this, m_ReturnAddress);
+ }
+}
+
+//
+// This updates the register display for a FuncEvalFrame.
+//
+inline void FuncEvalFrame::UpdateRegDisplay(const PREGDISPLAY pRD)
+{
+ SUPPORTS_DAC;
+ DebuggerEval * pDE = GetDebuggerEval();
+
+ // No context to update if we're doing a func eval from within exception processing.
+ if (pDE->m_evalDuringException)
+ {
+ return;
+ }
+
+#if !defined(_WIN64)
+ // Reset pContext; it's only valid for active (top-most) frame.
+ pRD->pContext = NULL;
+#endif // !_WIN64
+
+
+#ifdef _TARGET_X86_
+ // Update all registers in the reg display from the CONTEXT we stored when the thread was hijacked for this func
+ // eval. We have to update all registers, not just the callee saved registers, because we can hijack a thread at any
+ // point for a func eval, not just at a call site.
+ pRD->pEdi = &(pDE->m_context.Edi);
+ pRD->pEsi = &(pDE->m_context.Esi);
+ pRD->pEbx = &(pDE->m_context.Ebx);
+ pRD->pEdx = &(pDE->m_context.Edx);
+ pRD->pEcx = &(pDE->m_context.Ecx);
+ pRD->pEax = &(pDE->m_context.Eax);
+ pRD->pEbp = &(pDE->m_context.Ebp);
+ pRD->Esp = (DWORD)GetSP(&pDE->m_context);
+ pRD->PCTAddr = GetReturnAddressPtr();
+ pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr);
+
+#elif defined(_TARGET_AMD64_)
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this flag. This is only temporary.
+
+ memcpy(pRD->pCurrentContext, &(pDE->m_context), sizeof(CONTEXT));
+
+ pRD->pCurrentContextPointers->Rax = &(pDE->m_context.Rax);
+ pRD->pCurrentContextPointers->Rcx = &(pDE->m_context.Rcx);
+ pRD->pCurrentContextPointers->Rdx = &(pDE->m_context.Rdx);
+ pRD->pCurrentContextPointers->R8 = &(pDE->m_context.R8);
+ pRD->pCurrentContextPointers->R9 = &(pDE->m_context.R9);
+ pRD->pCurrentContextPointers->R10 = &(pDE->m_context.R10);
+ pRD->pCurrentContextPointers->R11 = &(pDE->m_context.R11);
+
+ pRD->pCurrentContextPointers->Rbx = &(pDE->m_context.Rbx);
+ pRD->pCurrentContextPointers->Rsi = &(pDE->m_context.Rsi);
+ pRD->pCurrentContextPointers->Rdi = &(pDE->m_context.Rdi);
+ pRD->pCurrentContextPointers->Rbp = &(pDE->m_context.Rbp);
+ pRD->pCurrentContextPointers->R12 = &(pDE->m_context.R12);
+ pRD->pCurrentContextPointers->R13 = &(pDE->m_context.R13);
+ pRD->pCurrentContextPointers->R14 = &(pDE->m_context.R14);
+ pRD->pCurrentContextPointers->R15 = &(pDE->m_context.R15);
+
+ // SyncRegDisplayToCurrentContext() sets the pRD->SP and pRD->ControlPC on AMD64.
+ SyncRegDisplayToCurrentContext(pRD);
+
+#elif defined(_TARGET_ARM_)
+ pRD->IsCallerContextValid = FALSE;
+ pRD->IsCallerSPValid = FALSE; // Don't add usage of this flag. This is only temporary.
+
+ memcpy(pRD->pCurrentContext, &(pDE->m_context), sizeof(T_CONTEXT));
+
+ pRD->pCurrentContextPointers->R4 = &(pDE->m_context.R4);
+ pRD->pCurrentContextPointers->R5 = &(pDE->m_context.R5);
+ pRD->pCurrentContextPointers->R6 = &(pDE->m_context.R6);
+ pRD->pCurrentContextPointers->R7 = &(pDE->m_context.R7);
+ pRD->pCurrentContextPointers->R8 = &(pDE->m_context.R8);
+ pRD->pCurrentContextPointers->R9 = &(pDE->m_context.R9);
+ pRD->pCurrentContextPointers->R10 = &(pDE->m_context.R10);
+ pRD->pCurrentContextPointers->R11 = &(pDE->m_context.R11);
+ pRD->pCurrentContextPointers->Lr = &(pDE->m_context.Lr);
+
+ pRD->volatileCurrContextPointers.R0 = &(pDE->m_context.R0);
+ pRD->volatileCurrContextPointers.R1 = &(pDE->m_context.R1);
+ pRD->volatileCurrContextPointers.R2 = &(pDE->m_context.R2);
+ pRD->volatileCurrContextPointers.R3 = &(pDE->m_context.R3);
+ pRD->volatileCurrContextPointers.R12 = &(pDE->m_context.R12);
+
+ SyncRegDisplayToCurrentContext(pRD);
+#else
+ PORTABILITY_ASSERT("FuncEvalFrame::UpdateRegDisplay is not implemented on this platform.");
+#endif
+}
+
+#endif // DEBUGGER_INL_
diff --git a/src/debug/ee/debuggermodule.cpp b/src/debug/ee/debuggermodule.cpp
new file mode 100644
index 0000000000..d46e507546
--- /dev/null
+++ b/src/debug/ee/debuggermodule.cpp
@@ -0,0 +1,445 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: DebuggerModule.cpp
+//
+
+//
+// Stuff for tracking DebuggerModules.
+//
+//*****************************************************************************
+
+#include "stdafx.h"
+#include "../inc/common.h"
+#include "perflog.h"
+#include "eeconfig.h" // This is here even for retail & free builds...
+#include "vars.hpp"
+#include <limits.h>
+#include "ilformatter.h"
+#include "debuginfostore.h"
+
+
+/* ------------------------------------------------------------------------ *
+ * Debugger Module routines
+ * ------------------------------------------------------------------------ */
+
+// <TODO> (8/12/2002)
+// We need to stop lying to the debugger about not sharing Modules.
+// Primary Modules allow a transition to that. Once we stop lying,
+// then all modules will be their own Primary.
+// </TODO>
+// Select the primary module.
+// Primary Modules are selected DebuggerModules that map 1:1 w/ Module*.
+// If the runtime module is not shared, then we're our own Primary Module.
+// If the Runtime module is shared, the primary module is some specific instance.
+// Note that a domain-neutral module can be loaded into multiple domains without
+// being loaded into the default domain, and so there is no "primary module" as far
+// as the CLR is concerned - we just pick any one and call it primary.
+void DebuggerModule::PickPrimaryModule()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Debugger::DebuggerDataLockHolder ch(g_pDebugger);
+
+ LOG((LF_CORDB, LL_INFO100000, "DM::PickPrimaryModule, this=0x%p\n", this));
+
+ // We're our own primary module, unless something else proves otherwise.
+ // Note that we should be able to skip all of this if this module is not domain neutral
+ m_pPrimaryModule = this;
+
+ // This should be thread safe because our creation for the DebuggerModules
+ // are serialized.
+
+ // Lookup our Runtime Module. If it's already in there,
+ // then
+ DebuggerModuleTable * pTable = g_pDebugger->GetModuleTable();
+
+ // If the table doesn't exist yet, then we must be a primary module.
+ if (pTable == NULL)
+ {
+ LOG((LF_CORDB, LL_INFO100000, "DM::PickPrimaryModule, this=0x%p, table not created yet\n", this));
+ return;
+ }
+
+ // Look through existing module list to find a common primary DebuggerModule
+ // for the given EE Module. We don't know what order we'll traverse in.
+
+ HASHFIND f;
+ for (DebuggerModule * m = pTable->GetFirstModule(&f);
+ m != NULL;
+ m = pTable->GetNextModule(&f))
+ {
+
+ if (m->GetRuntimeModule() == this->GetRuntimeModule())
+ {
+ // Make sure we're picking another primary module.
+ if (m->GetPrimaryModule() == m)
+ {
+ // If we find another one, it must be domain neutral
+ _ASSERTE( m_pRuntimeModule->GetAssembly()->IsDomainNeutral() );
+
+ m_pPrimaryModule = m;
+ LOG((LF_CORDB, LL_INFO100000, "DM::PickPrimaryModule, this=0x%p, primary=0x%p\n", this, m));
+ return;
+ }
+ }
+ } // end for
+
+ // If we got here, then this instance is a Primary Module.
+ LOG((LF_CORDB, LL_INFO100000, "DM::PickPrimaryModule, this=%p is first, primary.\n", this));
+}
+
+void DebuggerModule::SetCanChangeJitFlags(bool fCanChangeJitFlags)
+{
+ m_fCanChangeJitFlags = fCanChangeJitFlags;
+}
+
+#ifndef DACCESS_COMPILE
+
+
+DebuggerModuleTable::DebuggerModuleTable() : CHashTableAndData<CNewZeroData>(101)
+{
+ WRAPPER_NO_CONTRACT;
+
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ NewInit(101, sizeof(DebuggerModuleEntry), 101);
+}
+
+DebuggerModuleTable::~DebuggerModuleTable()
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(ThreadHoldsLock());
+ Clear();
+}
+
+
+#ifdef _DEBUG
+bool DebuggerModuleTable::ThreadHoldsLock()
+{
+ // In shutdown (g_fProcessDetach), the shutdown thread implicitly holds all locks.
+ return g_fProcessDetach || g_pDebugger->HasDebuggerDataLock();
+}
+#endif
+
+//
+// RemoveModules removes any module loaded into the given appdomain from the hash. This is used when we send an
+// ExitAppdomain event to ensure that there are no leftover modules in the hash. This can happen when we have shared
+// modules that aren't properly accounted for in the CLR. We miss sending UnloadModule events for those modules, so
+// we clean them up with this method.
+//
+void DebuggerModuleTable::RemoveModules(AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000, "DMT::RM removing all modules from AD 0x%08x\n", pAppDomain));
+
+ _ASSERTE(ThreadHoldsLock());
+
+ HASHFIND hf;
+ DebuggerModuleEntry *pDME = (DebuggerModuleEntry *) FindFirstEntry(&hf);
+
+ while (pDME != NULL)
+ {
+ DebuggerModule *pDM = pDME->module;
+
+ if (pDM->GetAppDomain() == pAppDomain)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DMT::RM removing DebuggerModule 0x%08x\n", pDM));
+
+ // Defer to the normal logic in RemoveModule for the actual removal. This accuratley simulates what
+ // happens when we process an UnloadModule event.
+ RemoveModule(pDM->GetRuntimeModule(), pAppDomain);
+
+ // Start back at the first entry since we just modified the hash.
+ pDME = (DebuggerModuleEntry *) FindFirstEntry(&hf);
+ }
+ else
+ {
+ pDME = (DebuggerModuleEntry *) FindNextEntry(&hf);
+ }
+ }
+
+ LOG((LF_CORDB, LL_INFO1000, "DMT::RM done removing all modules from AD 0x%08x\n", pAppDomain));
+}
+
+void DebuggerModuleTable::Clear()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(ThreadHoldsLock());
+
+ HASHFIND hf;
+ DebuggerModuleEntry *pDME;
+
+ pDME = (DebuggerModuleEntry *) FindFirstEntry(&hf);
+
+ while (pDME)
+ {
+ DebuggerModule *pDM = pDME->module;
+ Module *pEEM = pDM->GetRuntimeModule();
+
+ TRACE_FREE(pDME->module);
+ DeleteInteropSafe(pDM);
+ Delete(HASH(pEEM), (HASHENTRY *) pDME);
+
+ pDME = (DebuggerModuleEntry *) FindFirstEntry(&hf);
+ }
+
+ CHashTableAndData<CNewZeroData>::Clear();
+}
+
+void DebuggerModuleTable::AddModule(DebuggerModule *pModule)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(ThreadHoldsLock());
+
+ _ASSERTE(pModule != NULL);
+
+ LOG((LF_CORDB, LL_EVERYTHING, "DMT::AM: DebuggerMod:0x%x Module:0x%x AD:0x%x\n",
+ pModule, pModule->GetRuntimeModule(), pModule->GetAppDomain()));
+
+ DebuggerModuleEntry * pEntry = (DebuggerModuleEntry *) Add(HASH(pModule->GetRuntimeModule()));
+ if (pEntry == NULL)
+ {
+ ThrowOutOfMemory();
+ }
+
+ pEntry->module = pModule;
+
+ // Don't need to update the primary module since it was set when we created the module.
+ _ASSERTE(pModule->GetPrimaryModule() != NULL);
+}
+
+//-----------------------------------------------------------------------------
+// Remove a DebuggerModule from the module table.
+// This occurs in response to AppDomain unload.
+// Note that this doesn't necessarily mean the EE Module is being unloaded (it may be shared)
+//-----------------------------------------------------------------------------
+void DebuggerModuleTable::RemoveModule(Module* module, AppDomain *pAppDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(module != NULL);
+ _ASSERTE(ThreadHoldsLock());
+
+ DebuggerModule * pDeletedModule = NULL;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "DMT::RM: mod:0x%x AD:0x%x neutral:0x%x\n",
+ module, pAppDomain, module->GetAssembly()->IsDomainNeutral() ));
+
+ // If this is a domain neutral module, then scan the complete list of DebuggerModules looking
+ // for the one with a matching appdomain id.
+ // Note: we have to make sure to lookup the module with the app domain parameter if the module lives in a shared
+ // assembly or the system assembly. <BUGNUM>Bugs 65943 & 81728.</BUGNUM>
+ _ASSERTE( SystemDomain::SystemAssembly()->IsDomainNeutral() );
+ if (module->GetAssembly()->IsDomainNeutral())
+ {
+ // This module is being unloaded from a specific AppDomain, but may still exist in other AppDomains
+
+ HASHFIND findmodule;
+ DebuggerModuleEntry *moduleentry;
+
+ for (moduleentry = (DebuggerModuleEntry*) FindFirstEntry(&findmodule);
+ moduleentry != NULL;
+ moduleentry = (DebuggerModuleEntry*) FindNextEntry(&findmodule))
+ {
+ DebuggerModule *pModule = moduleentry->module;
+
+ if ((pModule->GetRuntimeModule() == module) &&
+ (pModule->GetAppDomain() == pAppDomain))
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "DMT::RM: found 0x%x (DM:0x%x)\n",
+ moduleentry, moduleentry->module));
+
+ pDeletedModule = pModule;
+
+ // Remove from table
+ Delete(HASH(module), (HASHENTRY *)moduleentry);
+
+ break;
+ }
+ }
+ // we should always find the module!!
+ _ASSERTE (moduleentry != NULL);
+ }
+ else
+ {
+ // This module is not shared among multiple AppDomains
+
+ DebuggerModuleEntry *entry
+ = (DebuggerModuleEntry *) Find(HASH(module), KEY(module));
+
+ _ASSERTE(entry != NULL); // it had better be in there!
+
+ if (entry != NULL) // if its not, we fail gracefully in a free build
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "DMT::RM: found 0x%x (DM:0x%x)\n",
+ entry, entry->module));
+
+ pDeletedModule = entry->module;
+
+ // Remove from table
+ Delete(HASH(module), (HASHENTRY *)entry);
+
+ // There should not be any other entry in the table for the same module
+ _ASSERTE( Find(HASH(module), KEY(module)) == NULL );
+ }
+ }
+
+ _ASSERTE(pDeletedModule != NULL);
+
+ // Update the primary module pointers. If any other module had this as a
+ // primary module, then we have to update that pointer (since we can't
+ // have our primary module be deleted!)
+ {
+ HASHFIND findmodule;
+ DebuggerModuleEntry *moduleentry;
+
+ DebuggerModule * pNewPrimary = NULL;
+
+ for (moduleentry = (DebuggerModuleEntry*) FindFirstEntry(&findmodule);
+ moduleentry != NULL;
+ moduleentry = (DebuggerModuleEntry*) FindNextEntry(&findmodule))
+ {
+ DebuggerModule *pOther = moduleentry->module;
+ _ASSERTE(pOther != NULL);
+ _ASSERTE(pOther != pDeletedModule);
+
+ // If pOther's primary was just deleted, then update it.
+ if (pOther->GetPrimaryModule() == pDeletedModule)
+ {
+ if (pNewPrimary == NULL)
+ {
+ pNewPrimary = pOther;
+ LOG((LF_CORDB, LL_INFO1000, "DMT::RM changed primary module from 0x%p to 0x%p\n", pDeletedModule, pNewPrimary));
+ }
+ pOther->SetPrimaryModule(pNewPrimary);
+ }
+ } // end for
+ }
+
+ DeleteInteropSafe(pDeletedModule);
+}
+
+
+#endif // DACCESS_COMPILE
+
+DebuggerModule *DebuggerModuleTable::GetModule(Module* module)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(module != NULL);
+ _ASSERTE(ThreadHoldsLock());
+
+ DebuggerModuleEntry *entry
+ = (DebuggerModuleEntry *) Find(HASH(module), KEY(module));
+ if (entry == NULL)
+ return NULL;
+ else
+ return entry->module;
+}
+
+// We should never look for a NULL Module *
+DebuggerModule *DebuggerModuleTable::GetModule(Module* module, AppDomain* pAppDomain)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(module != NULL);
+ _ASSERTE(ThreadHoldsLock());
+
+
+ HASHFIND findmodule;
+ DebuggerModuleEntry *moduleentry;
+
+ for (moduleentry = (DebuggerModuleEntry*) FindFirstEntry(&findmodule);
+ moduleentry != NULL;
+ moduleentry = (DebuggerModuleEntry*) FindNextEntry(&findmodule))
+ {
+ DebuggerModule *pModule = moduleentry->module;
+
+ if ((pModule->GetRuntimeModule() == module) &&
+ (pModule->GetAppDomain() == pAppDomain))
+ return pModule;
+ }
+
+ // didn't find any match! So return a matching module for any app domain
+ return NULL;
+}
+
+DebuggerModule *DebuggerModuleTable::GetFirstModule(HASHFIND *info)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(ThreadHoldsLock());
+
+ DebuggerModuleEntry *entry = (DebuggerModuleEntry *) FindFirstEntry(info);
+ if (entry == NULL)
+ return NULL;
+ else
+ return entry->module;
+}
+
+DebuggerModule *DebuggerModuleTable::GetNextModule(HASHFIND *info)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(ThreadHoldsLock());
+
+ DebuggerModuleEntry *entry = (DebuggerModuleEntry *) FindNextEntry(info);
+ if (entry == NULL)
+ return NULL;
+ else
+ return entry->module;
+}
+
+
diff --git a/src/debug/ee/frameinfo.cpp b/src/debug/ee/frameinfo.cpp
new file mode 100644
index 0000000000..810682d138
--- /dev/null
+++ b/src/debug/ee/frameinfo.cpp
@@ -0,0 +1,2216 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: frameinfo.cpp
+//
+
+//
+// Code to find control info about a stack frame.
+//
+//*****************************************************************************
+
+#include "stdafx.h"
+
+// Include so we can get information out of ComMethodFrame
+#ifdef FEATURE_COMINTEROP
+#include "COMToClrCall.h"
+#endif
+
+// Get a frame pointer from a RegDisplay.
+// This is mostly used for chains and stub frames (i.e. internal frames), where we don't need an exact
+// frame pointer. This is why it is okay to use the current SP instead of the caller SP on IA64.
+// We should really rename this and possibly roll it into GetFramePointer() when we move the stackwalker
+// to OOP.
+FramePointer GetSP(REGDISPLAY * pRDSrc)
+{
+ FramePointer fp = FramePointer::MakeFramePointer(
+ (LPVOID)GetRegdisplaySP(pRDSrc));
+
+ return fp;
+}
+
+// Get a frame pointer from a RegDisplay.
+FramePointer GetFramePointer(REGDISPLAY * pRDSrc)
+{
+ return FramePointer::MakeFramePointer(GetRegdisplaySP(pRDSrc));
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Convert a FramePointer to a StackFrame and return it.
+//
+// Arguments:
+// fp - the FramePointer to be converted
+//
+// Return Value:
+// a StackFrame equivalent to the given FramePointer
+//
+// Notes:
+// We really should consolidate the two abstractions for "stack frame identifiers"
+// (StackFrame and FramePointer) when we move the debugger stackwalker to OOP.
+//
+
+FORCEINLINE StackFrame ConvertFPToStackFrame(FramePointer fp)
+{
+ return StackFrame((UINT_PTR)fp.GetSPValue());
+}
+
+/* ------------------------------------------------------------------------- *
+ * DebuggerFrameInfo routines
+ * ------------------------------------------------------------------------- */
+
+//struct DebuggerFrameData: Contains info used by the DebuggerWalkStackProc
+// to do a stack walk. The info and pData fields are handed to the pCallback
+// routine at each frame,
+struct DebuggerFrameData
+{
+ // Initialize this struct. Only done at the start of a stackwalk.
+ void Init(
+ Thread * _pThread,
+ FramePointer _targetFP,
+ BOOL fIgnoreNonmethodFrames, // generally true for stackwalking and false for stepping
+ DebuggerStackCallback _pCallback,
+ void *_pData
+ )
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ this->pCallback = _pCallback;
+ this->pData = _pData;
+
+ this->cRealCounter = 0;
+
+ this->thread = _pThread;
+ this->targetFP = _targetFP;
+ this->targetFound = (_targetFP == LEAF_MOST_FRAME);
+
+ this->ignoreNonmethodFrames = fIgnoreNonmethodFrames;
+
+ // For now, we can tie these to flags together.
+ // In everett, we disable SIS (For backwards compat).
+ this->fProvideInternalFrames = (fIgnoreNonmethodFrames != 0);
+
+ this->fNeedToSendEnterManagedChain = false;
+ this->fTrackingUMChain = false;
+ this->fHitExitFrame = false;
+
+ this->info.eStubFrameType = STUBFRAME_NONE;
+ this->info.quickUnwind = false;
+
+ this->info.frame = NULL;
+ this->needParentInfo = false;
+
+#ifdef WIN64EXCEPTIONS
+ this->fpParent = LEAF_MOST_FRAME;
+ this->info.fIsLeaf = true;
+ this->info.fIsFunclet = false;
+ this->info.fIsFilter = false;
+#endif // WIN64EXCEPTIONS
+
+ // Look strange? Go to definition of this field. I dare you.
+ this->info.fIgnoreThisFrameIfSuppressingUMChainFromComPlusMethodFrameGeneric = false;
+
+#if defined(_DEBUG)
+ this->previousFP = LEAF_MOST_FRAME;
+#endif // _DEBUG
+ }
+
+ // True if we need the next CrawlFrame to fill out part of this FrameInfo's data.
+ bool needParentInfo;
+
+ // The FrameInfo that we'll dispatch to the pCallback. This matches against
+ // the CrawlFrame for that frame that the callback belongs too.
+ FrameInfo info;
+
+ // Regdisplay that the EE stackwalker is updating.
+ REGDISPLAY regDisplay;
+
+
+#ifdef WIN64EXCEPTIONS
+ // This is used to skip funclets in a stackwalk. It marks the frame pointer to which we should skip.
+ FramePointer fpParent;
+#endif // WIN64EXCEPTIONS
+#if defined(_DEBUG)
+ // For debugging, track the previous FramePointer so we can assert that we're
+ // making progress through the stack.
+ FramePointer previousFP;
+#endif // _DEBUG
+
+ // whether we have hit an exit frame or not (i.e. a M2U frame)
+ bool fHitExitFrame;
+
+private:
+ // The scope of this field is each section of managed method frames on the stack.
+ bool fNeedToSendEnterManagedChain;
+
+ // Flag set when we first stack-walk to decide if we want to ignore certain frames.
+ // Stepping doesn't ignore these frames; end user stacktraces do.
+ BOOL ignoreNonmethodFrames;
+
+ // Do we want callbacks for internal frames?
+ // Steppers generally don't. User stack-walk does.
+ bool fProvideInternalFrames;
+
+ // Info for tracking unmanaged chains.
+ // We track the starting (leaf) context for an unmanaged chain, as well as the
+ // ending (root) framepointer.
+ bool fTrackingUMChain;
+ REGDISPLAY rdUMChainStart;
+ FramePointer fpUMChainEnd;
+
+ // Thread that the stackwalk is for.
+ Thread *thread;
+
+
+ // Target FP indicates at what point in the stackwalk we'll start dispatching callbacks.
+ // Naturally, if this is LEAF_MOST_FRAME, then all callbacks will be dispatched
+ FramePointer targetFP;
+ bool targetFound;
+
+ // Count # of callbacks we could have dispatched (assuming targetFP==LEAF_MOST_FRAME).
+ // Useful for detecting leaf.
+ int cRealCounter;
+
+ // Callback & user-data supplied to that callback.
+ DebuggerStackCallback pCallback;
+ void *pData;
+
+ private:
+
+ // Raw invoke. This just does some consistency asserts,
+ // and invokes the callback if we're in the requested target range.
+ StackWalkAction RawInvokeCallback(FrameInfo * pInfo)
+ {
+#ifdef _DEBUG
+ _ASSERTE(pInfo != NULL);
+ MethodDesc * md = pInfo->md;
+ // Invoke the callback to the user. Log what we're invoking.
+ LOG((LF_CORDB, LL_INFO10000, "DSWCallback: MD=%s,0x%p, Chain=%x, Stub=%x, Frame=0x%p, Internal=%d\n",
+ ((md == NULL) ? "None" : md->m_pszDebugMethodName), md,
+ pInfo->chainReason,
+ pInfo->eStubFrameType,
+ pInfo->frame, pInfo->internal));
+
+ // Make sure we're providing a valid FrameInfo for the callback.
+ pInfo->AssertValid();
+#endif
+ // Update counter. This provides a convenient check for leaf FrameInfo.
+ this->cRealCounter++;
+
+
+ // Only invoke if we're past the target.
+ if (!this->targetFound && IsEqualOrCloserToLeaf(this->targetFP, this->info.fp))
+ {
+ this->targetFound = true;
+ }
+
+ if (this->targetFound)
+ {
+ return (pCallback)(pInfo, pData);
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000, "Not invoking yet.\n"));
+ }
+
+ return SWA_CONTINUE;
+ }
+
+public:
+ // Invoke a callback. This may do extra logic to preserve the interface between
+ // the LS stackwalker and the LS:
+ // - don't invoke if we're not at the target yet
+ // - send EnterManagedChains if we need it.
+ StackWalkAction InvokeCallback(FrameInfo * pInfo)
+ {
+ // Track if we've sent any managed code yet.
+ // If we haven't, then don't send the enter-managed chain. This catches cases
+ // when we have leaf-most unmanaged chain.
+ if ((pInfo->frame == NULL) && (pInfo->md != NULL))
+ {
+ this->fNeedToSendEnterManagedChain = true;
+ }
+
+
+ // Do tracking to decide if we need to send a Enter-Managed chain.
+ if (pInfo->HasChainMarker())
+ {
+ if (pInfo->managed)
+ {
+ // If we're dispatching a managed-chain, then we don't need to send another one.
+ fNeedToSendEnterManagedChain = false;
+ }
+ else
+ {
+ // If we're dispatching an UM chain, then send the Managed one.
+ // Note that the only unmanaged chains are ThreadStart chains and UM chains.
+ if (fNeedToSendEnterManagedChain)
+ {
+ fNeedToSendEnterManagedChain = false;
+
+ FrameInfo f;
+
+ // Assume entry chain's FP is one pointer-width after the upcoming UM chain.
+ FramePointer fpRoot = FramePointer::MakeFramePointer(
+ (BYTE*) GetRegdisplaySP(&pInfo->registers) - sizeof(DWORD*));
+
+ f.InitForEnterManagedChain(fpRoot);
+ if (RawInvokeCallback(&f) == SWA_ABORT)
+ {
+ return SWA_ABORT;
+ }
+ }
+ }
+ }
+
+ return RawInvokeCallback(pInfo);
+ }
+
+ // Note that we should start tracking an Unmanaged Chain.
+ void BeginTrackingUMChain(FramePointer fpRoot, REGDISPLAY * pRDSrc)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!this->fTrackingUMChain);
+
+ CopyREGDISPLAY(&this->rdUMChainStart, pRDSrc);
+
+ this->fTrackingUMChain = true;
+ this->fpUMChainEnd = fpRoot;
+ this->fHitExitFrame = false;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "UM Chain starting at Frame=0x%p\n", this->fpUMChainEnd.GetSPValue()));
+
+ // This UM chain may get cancelled later, so don't even worry about toggling the fNeedToSendEnterManagedChain bit here.
+ // Invoke() will track whether to send an Enter-Managed chain or not.
+ }
+
+ // For various heuristics, we may not want to send an UM chain.
+ void CancelUMChain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(this->fTrackingUMChain);
+ this->fTrackingUMChain = false;
+ }
+
+ // True iff we're currently tracking an unmanaged chain.
+ bool IsTrackingUMChain()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return this->fTrackingUMChain;
+ }
+
+
+
+ // Get/Set Regdisplay that starts an Unmanaged chain.
+ REGDISPLAY * GetUMChainStartRD()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(fTrackingUMChain);
+ return &rdUMChainStart;
+ }
+
+ // Get/Set FramePointer that ends an unmanaged chain.
+ void SetUMChainEnd(FramePointer fp)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(fTrackingUMChain);
+ fpUMChainEnd = fp;
+ }
+
+ FramePointer GetUMChainEnd()
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(fTrackingUMChain);
+ return fpUMChainEnd;
+ }
+
+ // Get thread we're currently tracing.
+ Thread * GetThread()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return thread;
+ }
+
+ // Returns true if we're on the leaf-callback (ie, we haven't dispatched a callback yet.
+ bool IsLeafCallback()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return cRealCounter == 0;
+ }
+
+ bool ShouldProvideInternalFrames()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return fProvideInternalFrames;
+ }
+ bool ShouldIgnoreNonmethodFrames()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return ignoreNonmethodFrames != 0;
+ }
+};
+
+
+//---------------------------------------------------------------------------------------
+//
+// On IA64, the offset given by the OS during stackwalking is actually the offset at the call instruction.
+// This is different from x86 and X64, where the offset is immediately after the call instruction. In order
+// to have a uniform behaviour, we need to do adjust the relative offset on IA64. This function is a nop on
+// other platforms.
+//
+// Arguments:
+// pCF - the CrawlFrame for the current method frame
+// pInfo - This is the FrameInfo for the current method frame. We need to use the fIsLeaf field,
+// since no adjustment is necessary for leaf frames.
+//
+// Return Value:
+// returns the adjusted relative offset
+//
+
+inline ULONG AdjustRelOffset(CrawlFrame *pCF,
+ FrameInfo *pInfo)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pCF != NULL);
+ }
+ CONTRACTL_END;
+
+#if defined(_TARGET_ARM_)
+ return pCF->GetRelOffset() & ~THUMB_CODE;
+#else
+ return pCF->GetRelOffset();
+#endif
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Even when there is an exit frame in the explicit frame chain, it does not necessarily mean that we have
+// actually called out to unmanaged code yet or that we actually have a managed call site. Given an exit
+// frame, this function determines if we have a managed call site and have already called out to unmanaged
+// code. If we have, then we return the caller SP as the potential frame pointer. Otherwise we return
+// LEAF_MOST_FRAME.
+//
+// Arguments:
+// pFrame - the exit frame to be checked
+// pData - the state of the current frame maintained by the debugger stackwalker
+// pPotentialFP - This is an out parameter. It returns the caller SP of the last managed caller if
+// there is a managed call site and we have already called out to unmanaged code.
+// Otherwise, LEAF_MOST_FRAME is returned.
+//
+// Return Value:
+// true - we have a managed call site and we have called out to unmanaged code
+// false - otherwise
+//
+
+bool HasExitRuntime(Frame *pFrame, DebuggerFrameData *pData, FramePointer *pPotentialFP)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER; // Callers demand this function be GC_NOTRIGGER.
+ MODE_ANY;
+ PRECONDITION(pFrame->GetFrameType() == Frame::TYPE_EXIT);
+ }
+ CONTRACTL_END;
+
+#ifdef _TARGET_X86_
+ TADDR returnIP, returnSP;
+
+ EX_TRY
+ {
+ // This is a real issue. This may be called while holding GC-forbid locks, and so
+ // this function can't trigger a GC. However, the only impl we have calls GC-trigger functions.
+ CONTRACT_VIOLATION(GCViolation);
+ pFrame->GetUnmanagedCallSite(NULL, &returnIP, &returnSP);
+ }
+ EX_CATCH
+ {
+ // We never expect an actual exception here (maybe in oom).
+ // If we get an exception, then simulate the default behavior for GetUnmanagedCallSite.
+ returnIP = NULL;
+ returnSP = NULL; // this will cause us to return true.
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ LOG((LF_CORDB, LL_INFO100000,
+ "DWSP: TYPE_EXIT: returnIP=0x%08x, returnSP=0x%08x, frame=0x%08x, threadFrame=0x%08x, regSP=0x%08x\n",
+ returnIP, returnSP, pFrame, pData->GetThread()->GetFrame(), GetRegdisplaySP(&pData->regDisplay)));
+
+ if (pPotentialFP != NULL)
+ {
+ *pPotentialFP = FramePointer::MakeFramePointer((void*)returnSP);
+ }
+
+ return ((pFrame != pData->GetThread()->GetFrame()) ||
+ (returnSP == NULL) ||
+ ((TADDR)GetRegdisplaySP(&pData->regDisplay) <= returnSP));
+
+#else // _TARGET_X86_
+ // DebuggerExitFrame always return a NULL returnSP on x86.
+ if (pFrame->GetVTablePtr() == DebuggerExitFrame::GetMethodFrameVPtr())
+ {
+ if (pPotentialFP != NULL)
+ {
+ *pPotentialFP = LEAF_MOST_FRAME;
+ }
+ return true;
+ }
+ else if (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())
+ {
+ InlinedCallFrame *pInlinedFrame = static_cast<InlinedCallFrame *>(pFrame);
+ LPVOID sp = (LPVOID)pInlinedFrame->GetCallSiteSP();
+
+ // The sp returned below is the sp of the caller, which is either an IL stub in the normal case
+ // or a normal managed method in the inlined pinvoke case.
+ // This sp may be the same as the frame's address, so we need to use the largest
+ // possible bsp value to make sure that this frame pointer is closer to the root than
+ // the frame pointer made from the frame address itself.
+ if (pPotentialFP != NULL)
+ {
+ *pPotentialFP = FramePointer::MakeFramePointer( (LPVOID)sp );
+ }
+
+ return ((pFrame != pData->GetThread()->GetFrame()) ||
+ InlinedCallFrame::FrameHasActiveCall(pInlinedFrame));
+
+ }
+ else
+ {
+ // It'll be nice if there's a way to assert that the current frame is indeed of a
+ // derived class of TransitionFrame.
+ TransitionFrame *pTransFrame = static_cast<TransitionFrame*>(pFrame);
+ LPVOID sp = (LPVOID)pTransFrame->GetSP();
+
+ // The sp returned below is the sp of the caller, which is either an IL stub in the normal case
+ // or a normal managed method in the inlined pinvoke case.
+ // This sp may be the same as the frame's address, so we need to use the largest
+ // possible bsp value to make sure that this frame pointer is closer to the root than
+ // the frame pointer made from the frame address itself.
+ if (pPotentialFP != NULL)
+ {
+ *pPotentialFP = FramePointer::MakeFramePointer( (LPVOID)sp );
+ }
+
+ return true;
+ }
+#endif // _TARGET_X86_
+}
+
+#ifdef _DEBUG
+
+//-----------------------------------------------------------------------------
+// Debug helpers to get name of Frame.
+//-----------------------------------------------------------------------------
+LPCUTF8 FrameInfo::DbgGetClassName()
+{
+ return (md == NULL) ? ("None") : (md->m_pszDebugClassName);
+}
+LPCUTF8 FrameInfo::DbgGetMethodName()
+{
+ return (md == NULL) ? ("None") : (md->m_pszDebugMethodName);
+}
+
+
+//-----------------------------------------------------------------------------
+// Debug helper to asserts invariants about a FrameInfo before we dispatch it.
+//-----------------------------------------------------------------------------
+void FrameInfo::AssertValid()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ bool fMethod = this->HasMethodFrame();
+ bool fStub = this->HasStubFrame();
+ bool fChain = this->HasChainMarker();
+
+ // Can't be both Stub & Chain
+ _ASSERTE(!fStub || !fChain);
+
+ // Must be at least a Method, Stub or Chain or Internal
+ _ASSERTE(fMethod || fStub || fChain || this->internal);
+
+ // Check Managed status is consistent
+ if (fMethod)
+ {
+ _ASSERTE(this->managed); // We only report managed methods
+ }
+ if (fChain)
+ {
+ if (!managed)
+ {
+ // Only certain chains can be unmanaged
+ _ASSERTE((this->chainReason == CHAIN_THREAD_START) ||
+ (this->chainReason == CHAIN_ENTER_UNMANAGED));
+ }
+ else
+ {
+ // UM chains can never be managed.
+ _ASSERTE((this->chainReason != CHAIN_ENTER_UNMANAGED));
+ }
+
+ }
+
+ // FramePointer should be valid
+ _ASSERTE(this->fp != LEAF_MOST_FRAME);
+ _ASSERTE((this->fp != ROOT_MOST_FRAME) || (chainReason== CHAIN_THREAD_START) || (chainReason == CHAIN_ENTER_UNMANAGED));
+
+ // If we have a Method, then we need an AppDomain.
+ // (RS will need it to do lookup)
+ if (fMethod)
+ {
+ _ASSERTE(currentAppDomain != NULL);
+ _ASSERTE(managed);
+ // Stubs may have a method w/o any code (eg, PInvoke wrapper).
+ // @todo - Frame::TYPE_TP_METHOD_FRAME breaks this assert. Are there other cases too?
+ //_ASSERTE(fStub || (pIJM != NULL));
+ }
+
+ if (fStub)
+ {
+ // All stubs (except LightWeightFunctions) match up w/a Frame.
+ _ASSERTE(this->frame || (eStubFrameType == STUBFRAME_LIGHTWEIGHT_FUNCTION));
+ }
+}
+#endif
+
+//-----------------------------------------------------------------------------
+// Get the DJI associated w/ this frame. This is a convenience function.
+// This is recommended over using MethodDescs because DJI's are version-aware.
+//-----------------------------------------------------------------------------
+DebuggerJitInfo * FrameInfo::GetJitInfoFromFrame()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // Not all FrameInfo objects correspond to actual code.
+ if (HasChainMarker() || HasStubFrame() || (frame != NULL))
+ {
+ return NULL;
+ }
+
+ DebuggerJitInfo *ji = NULL;
+
+ // @todo - we shouldn't need both a MD and an IP here.
+ EX_TRY
+ {
+ _ASSERTE(this->md != NULL);
+ ji = g_pDebugger->GetJitInfo(this->md, (const BYTE*)GetControlPC(&(this->registers)));
+ _ASSERTE(ji != NULL);
+ _ASSERTE(ji->m_fd == this->md);
+ }
+ EX_CATCH
+ {
+ ji = NULL;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ return ji;
+}
+
+//-----------------------------------------------------------------------------
+// Get the DMI associated w/ this frame. This is a convenience function.
+// DMIs are 1:1 with the (token, module) pair.
+//-----------------------------------------------------------------------------
+DebuggerMethodInfo * FrameInfo::GetMethodInfoFromFrameOrThrow()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ MethodDesc * pDesc = this->md;
+ mdMethodDef token = pDesc-> GetMemberDef();
+ Module * pRuntimeModule = pDesc->GetModule();
+
+ DebuggerMethodInfo *dmi = g_pDebugger->GetOrCreateMethodInfo(pRuntimeModule, token);
+ return dmi;
+}
+
+
+//-----------------------------------------------------------------------------
+// Init a FrameInfo for a UM chain.
+// We need a stackrange to give to an unmanaged debugger.
+// pRDSrc->Esp will provide the start (leaf) marker.
+// fpRoot will provide the end (root) portion.
+//-----------------------------------------------------------------------------
+void FrameInfo::InitForUMChain(FramePointer fpRoot, REGDISPLAY * pRDSrc)
+{
+ _ASSERTE(pRDSrc != NULL);
+
+ // Mark that we're an UM Chain (and nothing else).
+ this->frame = NULL;
+ this->md = NULL;
+
+ // Fp will be the end (root) of the stack range.
+ // pRDSrc->Sp will be the start (leaf) of the stack range.
+ CopyREGDISPLAY(&(this->registers), pRDSrc);
+ this->fp = fpRoot;
+
+ this->quickUnwind = false;
+ this->internal = false;
+ this->managed = false;
+
+ // These parts of the FrameInfo can be ignored for a UM chain.
+ this->relOffset = 0;
+ this->pIJM = NULL;
+ this->MethodToken = METHODTOKEN(NULL, 0);
+ this->currentAppDomain = NULL;
+ this->exactGenericArgsToken = NULL;
+
+ InitForScratchFrameInfo();
+
+ this->chainReason = CHAIN_ENTER_UNMANAGED;
+ this->eStubFrameType = STUBFRAME_NONE;
+
+#ifdef _DEBUG
+ FramePointer fpLeaf = GetSP(pRDSrc);
+ _ASSERTE(IsCloserToLeaf(fpLeaf, fpRoot));
+#endif
+
+#ifdef _DEBUG
+ // After we just init it, it had better be valid.
+ this->AssertValid();
+#endif
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// This is just a small helper to initialize the fields which are specific to 64-bit. Note that you should
+// only call this function on a scratch FrameInfo. Never call it on the FrameInfo used by the debugger
+// stackwalker to store information on the current frame.
+//
+
+void FrameInfo::InitForScratchFrameInfo()
+{
+#ifdef WIN64EXCEPTIONS
+ // The following flags cannot be trashed when we are calling this function on the curret FrameInfo
+ // (the one we keep track of across multiple stackwalker callbacks). Thus, make sure you do not call
+ // this function from InitForDynamicMethod(). In all other cases, we can call this method after we
+ // call InitFromStubHelper() because we are working on a local scratch variable.
+ this->fIsLeaf = false;
+ this->fIsFunclet = false;
+ this->fIsFilter = false;
+#endif // WIN64EXCEPTIONS
+}
+
+
+//-----------------------------------------------------------------------------
+//
+// Init a FrameInfo for a stub. Stub frames map to internal frames on the RS. Stubs which we care about
+// usually contain an explicit frame which translates to an internal frame on the RS. Dynamic method is
+// the sole exception.
+//
+// Arguments:
+// pCF - the CrawlFrame containing the state of the current frame
+// pMDHint - some stubs have associated MethodDesc but others don't,
+// which is why this argument can be NULL
+// type - the type of the stub/internal frame
+//
+
+void FrameInfo::InitFromStubHelper(
+ CrawlFrame * pCF,
+ MethodDesc * pMDHint, // NULL ok
+ CorDebugInternalFrameType type
+)
+{
+ _ASSERTE(pCF != NULL);
+
+ Frame * pFrame = pCF->GetFrame();
+
+ LOG((LF_CORDB, LL_EVERYTHING, "InitFromStubHelper. Frame=0x%p, type=%d\n", pFrame, type));
+
+ // All Stubs have a Frame except for LightWeight methods
+ _ASSERTE((type == STUBFRAME_LIGHTWEIGHT_FUNCTION) || (pFrame != NULL));
+ REGDISPLAY *pRDSrc = pCF->GetRegisterSet();
+
+ this->frame = pFrame;
+
+ // Stub frames may be associated w/ a Method (as a hint). However this method
+ // will never have a JitManager b/c it will never have IL (if it had IL, we'd be a
+ // regulare frame, not a stub frame)
+ this->md = pMDHint;
+
+ CopyREGDISPLAY(&this->registers, pRDSrc);
+
+ // FramePointer must match up w/ an EE Frame b/c that's how we match
+ // we Exception callbacks.
+ if (pFrame != NULL)
+ {
+ this->fp = FramePointer::MakeFramePointer(
+ (LPVOID) pFrame);
+ }
+ else
+ {
+ this->fp = GetSP(pRDSrc);
+ }
+
+ this->quickUnwind = false;
+ this->internal = false;
+ this->managed = true;
+ this->relOffset = 0;
+ this->ambientSP = NULL;
+
+
+ // Method associated w/a stub will never have a JitManager.
+ this->pIJM = NULL;
+ this->MethodToken = METHODTOKEN(NULL, 0);
+ this->currentAppDomain = pCF->GetAppDomain();
+ this->exactGenericArgsToken = NULL;
+
+ // Stub frames are mutually exclusive with chain markers.
+ this->chainReason = CHAIN_NONE;
+ this->eStubFrameType = type;
+
+#ifdef _DEBUG
+ // After we just init it, it had better be valid.
+ this->AssertValid();
+#endif
+}
+
+//-----------------------------------------------------------------------------
+// Initialize a FrameInfo to be used for an "InternalFrame"
+// Frame should be a derived class of FramedMethodFrame.
+// FrameInfo's MethodDesc will be for managed wrapper for native call.
+//-----------------------------------------------------------------------------
+void FrameInfo::InitForM2UInternalFrame(CrawlFrame * pCF)
+{
+ // For a M2U call, there's a managed method wrapping the unmanaged call. Use that.
+ Frame * pFrame = pCF->GetFrame();
+ _ASSERTE(pFrame->GetTransitionType() == Frame::TT_M2U);
+ FramedMethodFrame * pM2U = static_cast<FramedMethodFrame*> (pFrame);
+ MethodDesc * pMDWrapper = pM2U->GetFunction();
+
+ // Soem M2U transitions may not have a function associated w/ them,
+ // so pMDWrapper may be NULL. PInvokeCalliFrame is an example.
+
+ InitFromStubHelper(pCF, pMDWrapper, STUBFRAME_M2U);
+ InitForScratchFrameInfo();
+}
+
+//-----------------------------------------------------------------------------
+// Initialize for the U2M case...
+//-----------------------------------------------------------------------------
+void FrameInfo::InitForU2MInternalFrame(CrawlFrame * pCF)
+{
+ PREFIX_ASSUME(pCF != NULL);
+ MethodDesc * pMDHint = NULL;
+
+#ifdef FEATURE_COMINTEROP
+ Frame * pFrame = pCF->GetFrame();
+ PREFIX_ASSUME(pFrame != NULL);
+
+
+ // For regular U2M PInvoke cases, we don't care about MD b/c it's just going to
+ // be the next frame.
+ // If we're a COM2CLR call, perhaps we can get the MD for the interface.
+ if (pFrame->GetVTablePtr() == ComMethodFrame::GetMethodFrameVPtr())
+ {
+ ComMethodFrame* pCOMFrame = static_cast<ComMethodFrame*> (pFrame);
+ ComCallMethodDesc* pCMD = reinterpret_cast<ComCallMethodDesc *> (pCOMFrame->ComMethodFrame::GetDatum());
+ pMDHint = pCMD->GetInterfaceMethodDesc();
+
+ // Some COM-interop cases don't have an intermediate interface method desc, so
+ // pMDHint may be null.
+ }
+#endif
+
+ InitFromStubHelper(pCF, pMDHint, STUBFRAME_U2M);
+ InitForScratchFrameInfo();
+}
+
+//-----------------------------------------------------------------------------
+// Init for an AD transition
+//-----------------------------------------------------------------------------
+void FrameInfo::InitForADTransition(CrawlFrame * pCF)
+{
+ Frame * pFrame;
+ pFrame = pCF->GetFrame();
+ _ASSERTE(pFrame->GetTransitionType() == Frame::TT_AppDomain);
+ MethodDesc * pMDWrapper = NULL;
+
+ InitFromStubHelper(pCF, pMDWrapper, STUBFRAME_APPDOMAIN_TRANSITION);
+ InitForScratchFrameInfo();
+}
+
+
+//-----------------------------------------------------------------------------
+// Init frame for a dynamic method.
+//-----------------------------------------------------------------------------
+void FrameInfo::InitForDynamicMethod(CrawlFrame * pCF)
+{
+ // These are just stack markers that there's a dynamic method on the callstack.
+ InitFromStubHelper(pCF, NULL, STUBFRAME_LIGHTWEIGHT_FUNCTION);
+ // Do not call InitForScratchFrameInfo() here! Please refer to the comment in that function.
+}
+
+//-----------------------------------------------------------------------------
+// Init an internal frame to mark a func-eval.
+//-----------------------------------------------------------------------------
+void FrameInfo::InitForFuncEval(CrawlFrame * pCF)
+{
+ // We don't store a MethodDesc hint referring to the method we're going to invoke because
+ // uses of stub frames will assume the MD is relative to the AppDomain the frame is in.
+ // For cross-AD funcevals, we're invoking a method in a domain other than the one this frame
+ // is in.
+ MethodDesc * pMDHint = NULL;
+
+ // Add a stub frame here to mark that there is a FuncEvalFrame on the stack.
+ InitFromStubHelper(pCF, pMDHint, STUBFRAME_FUNC_EVAL);
+ InitForScratchFrameInfo();
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Initialize a FrameInfo for sending the CHAIN_THREAD_START reason.
+// The common case is that the chain is NOT managed, since the lowest (closest to the root) managed method
+// is usually called from unmanaged code. In fact, in Whidbey, we should never have a managed chain.
+//
+// Arguments:
+// pRDSrc - a REGDISPLAY for the beginning (the leafmost frame) of the chain
+//
+void FrameInfo::InitForThreadStart(Thread * pThread, REGDISPLAY * pRDSrc)
+{
+ this->frame = (Frame *) FRAME_TOP;
+ this->md = NULL;
+ CopyREGDISPLAY(&(this->registers), pRDSrc);
+ this->fp = FramePointer::MakeFramePointer(pThread->GetCachedStackBase());
+ this->quickUnwind = false;
+ this->internal = false;
+ this->managed = false;
+ this->relOffset = 0;
+ this->pIJM = NULL;
+ this->MethodToken = METHODTOKEN(NULL, 0);
+
+ this->currentAppDomain = NULL;
+ this->exactGenericArgsToken = NULL;
+
+ InitForScratchFrameInfo();
+
+ this->chainReason = CHAIN_THREAD_START;
+ this->eStubFrameType = STUBFRAME_NONE;
+
+#ifdef _DEBUG
+ // After we just init it, it had better be valid.
+ this->AssertValid();
+#endif
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Initialize a FrameInfo for sending a CHAIN_ENTER_MANAGED.
+// A Enter-Managed chain is always sent immediately before an UM chain, meaning that the Enter-Managed chain
+// is closer to the leaf than the UM chain.
+//
+// Arguments:
+// fpRoot - This is the frame pointer for the Enter-Managed chain. It is currently arbitrarily set
+// to be one stack slot higher (closer to the leaf) than the frame pointer of the beginning
+// of the upcoming UM chain.
+//
+
+void FrameInfo::InitForEnterManagedChain(FramePointer fpRoot)
+{
+ // Nobody should use a EnterManagedChain's Frame*, but there's no
+ // good value to enforce that.
+ this->frame = (Frame *) FRAME_TOP;
+ this->md = NULL;
+ memset((void *)&this->registers, 0, sizeof(this->registers));
+ this->fp = fpRoot;
+
+ this->quickUnwind = true;
+ this->internal = false;
+ this->managed = true;
+ this->relOffset = 0;
+ this->pIJM = NULL;
+ this->MethodToken = METHODTOKEN(NULL, 0);
+
+ this->currentAppDomain = NULL;
+ this->exactGenericArgsToken = NULL;
+
+ InitForScratchFrameInfo();
+
+ this->chainReason = CHAIN_ENTER_MANAGED;
+ this->eStubFrameType = STUBFRAME_NONE;
+}
+
+//-----------------------------------------------------------------------------
+// Do tracking for UM chains.
+// This may invoke the UMChain callback and M2U callback.
+//-----------------------------------------------------------------------------
+StackWalkAction TrackUMChain(CrawlFrame *pCF, DebuggerFrameData *d)
+{
+ Frame *frame = g_pEEInterface->GetFrame(pCF);
+
+ // If we encounter an ExitFrame out in the wild, then we'll convert it to an UM chain.
+ if (!d->IsTrackingUMChain())
+ {
+ if ((frame != NULL) && (frame != FRAME_TOP) && (frame->GetFrameType() == Frame::TYPE_EXIT))
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "DWSP. ExitFrame while not tracking\n"));
+ REGDISPLAY* pRDSrc = pCF->GetRegisterSet();
+
+ d->BeginTrackingUMChain(GetSP(pRDSrc), pRDSrc);
+
+ // fall through and we'll send the UM chain.
+ }
+ else
+ {
+ return SWA_CONTINUE;
+ }
+ }
+
+ _ASSERTE(d->IsTrackingUMChain());
+
+
+ // If we're tracking an UM chain, then we need to:
+ // - possibly refine the start & end values as we get new information in the stacktrace.
+ // - possibly cancel the UM chain for various heuristics.
+ // - possibly dispatch if we've hit managed code again.
+
+ bool fDispatchUMChain = false;
+ // UM Chain stops when managed code starts again.
+ if (frame != NULL)
+ {
+ // If it's just a EE Frame, then update this as a possible end of stack range for the UM chain.
+ // (The end of a stack range is closer to the root.)
+ d->SetUMChainEnd(FramePointer::MakeFramePointer((LPVOID)(frame)));
+
+
+ Frame::ETransitionType t = frame->GetTransitionType();
+ int ft = frame->GetFrameType();
+
+
+ // Sometimes we may not want to show an UM chain b/c we know it's just
+ // code inside of mscorwks. (Eg: Funcevals & AD transitions both fall into this category).
+ // These are perfectly valid UM chains and we could give them if we wanted to.
+ if ((t == Frame::TT_AppDomain) || (ft == Frame::TYPE_FUNC_EVAL))
+ {
+ d->CancelUMChain();
+ return SWA_CONTINUE;
+ }
+
+ // If we hit an M2U frame, then go ahead and dispatch the UM chain now.
+ // This will likely also be an exit frame.
+ if (t == Frame::TT_M2U)
+ {
+ fDispatchUMChain = true;
+ }
+
+ // If we get an Exit frame, we can use that to "prune" the UM chain to a more friendly state.
+ // This heuristic is optional, it just eliminates lots of internal mscorwks frames from the callstack.
+ // Note that this heuristic is only useful if we get a callback on the entry frame
+ // (e.g. UMThkCallFrame) between the callback on the native marker and the callback on the exit frame.
+ // Otherwise the REGDISPLAY will be the same.
+ if (ft == Frame::TYPE_EXIT)
+ {
+ // If we have a valid reg-display (non-null IP) then update it.
+ // We may have an invalid reg-display if we have an exit frame on an inactive thread.
+ REGDISPLAY * pNewRD = pCF->GetRegisterSet();
+ if (GetControlPC(pNewRD) != NULL)
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "DWSP. updating RD while tracking UM chain\n"));
+ CopyREGDISPLAY(d->GetUMChainStartRD(), pNewRD);
+ }
+
+ FramePointer fpLeaf = GetSP(d->GetUMChainStartRD());
+ _ASSERTE(IsCloserToLeaf(fpLeaf, d->GetUMChainEnd()));
+
+
+ _ASSERTE(!d->fHitExitFrame); // should only have 1 exit frame per UM chain code.
+ d->fHitExitFrame = true;
+
+ FramePointer potentialFP;
+
+ FramePointer fpNewChainEnd = d->GetUMChainEnd();
+
+ // Check to see if we are inside the unmanaged call. We want to make sure we only report an exit frame after
+ // we've really exited. There is a short period between where we setup the frame and when we actually exit
+ // the runtime. This check is intended to ensure we're actually outside now.
+ if (HasExitRuntime(frame, d, &potentialFP))
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "HasExitRuntime. potentialFP=0x%p\n", potentialFP.GetSPValue()));
+
+ // If we have no call site, manufacture a FP using the current frame.
+ // If we do have a call site, then the FP is actually going to be the caller SP,
+ // where the caller is the last managed method before calling out to unmanaged code.
+ if (potentialFP == LEAF_MOST_FRAME)
+ {
+ fpNewChainEnd = FramePointer::MakeFramePointer((LPVOID)((BYTE*)frame - sizeof(LPVOID)));
+ }
+ else
+ {
+ fpNewChainEnd = potentialFP;
+ }
+
+ }
+ // For IL stubs, we may actually push an uninitialized InlinedCallFrame frame onto the frame chain
+ // in jitted managed code, and then later on initialize it in a native runtime helper. In this case, if
+ // HasExitRuntime() is false (meaning the frame is uninitialized), then we are actually still in managed
+ // code and have not made the call to native code yet, so we should report an unmanaged chain.
+ else
+ {
+ d->CancelUMChain();
+ return SWA_CONTINUE;
+ }
+
+ fDispatchUMChain = true;
+
+ // If we got a valid chain end, then prune the UM chain accordingly.
+ // Note that some EE Frames will give invalid info back so we have to check.
+ // PInvokeCalliFrame is one example (when doing MC++ function pointers)
+ if (IsCloserToRoot(fpNewChainEnd, fpLeaf))
+ {
+ d->SetUMChainEnd(fpNewChainEnd);
+ }
+ else
+ {
+ _ASSERTE(IsCloserToLeaf(fpLeaf, d->GetUMChainEnd()));
+ }
+ } // end ExitFrame
+
+ // Only CLR internal code / stubs can push Frames onto the Frame chain.
+ // So if we hit a raw interceptor frame before we hit any managed frame, then this whole
+ // UM chain must still be in CLR internal code.
+ // Either way, this UM chain has ended (and some new chain based off the frame has started)
+ // so we need to either Cancel the chain or dispatch it.
+ if (frame->GetInterception() != Frame::INTERCEPTION_NONE)
+ {
+ // Interceptors may contain calls out to unmanaged code (such as unmanaged dllmain when
+ // loading a new dll), so we need to dispatch these.
+ // These extra UM chains don't show in Everett, and so everett debuggers on whidbey
+ // may see new chains.
+ // We need to ensure that whidbey debuggers are updated first.
+ fDispatchUMChain = true;
+ }
+ }
+ else
+ {
+ // If it's a real method (not just an EE Frame), then the UM chain is over.
+ fDispatchUMChain = true;
+ }
+
+
+ if (fDispatchUMChain)
+ {
+ // Check if we should cancel the UM chain.
+
+ // We need to discriminate between the following 2 cases:
+ // 1) Managed -(a)-> mscorwks -(b)-> Managed (leaf)
+ // 2) Native -(a)-> mscorwks -(b)-> Managed (leaf)
+ //
+ // --INCORRECT RATIONALE SEE "CORRECTION" BELOW--
+ // Case 1 could happen if a managed call injects a stub (such as w/ delegates).
+ // In both cases, the (mscorwks-(b)->managed) transition causes a IsNativeMarker callback
+ // which initiates a UM chain. In case 1, we want to cancel the UM chain, but
+ // in case 2 we want to dispatch it.
+ // The difference is case #2 will have some EE Frame at (b) and case #1 won't.
+ // That EE Frame should have caused us to dispatch the call for the managed method, and
+ // thus by the time we get around to dispatching the UM Chain, we shouldn't have a managed
+ // method waiting to be dispatched in the DebuggerFrameData.
+ // --END INCORRECT RATIONALE--
+ //
+ // This is kind of messed up. First of all, the assertions on case 2 is not true on 64-bit.
+ // We won't have an explicit frame at (b). Secondly, case 1 is not always true either.
+ // Consider the case where we are calling a cctor at prestub time. This is what the stack may
+ // look like: managed -> PrestubMethodFrame -> GCFrame -> managed (cctor) (leaf). In this case,
+ // we will actually send the UM chain because we will have dispatched the call for the managed
+ // method (the cctor) when we get a callback for the GCFrame.
+ //
+ // --INCORRECT SEE "CORRECTION" BELOW--
+ // Keep in mind that this is just a heuristic to reduce the number of UM chains we are sending
+ // over to the RS.
+ // --END INCORRECT --
+ //
+ // CORRECTION: These UM chains also feed into the results of at least ControllerStackInfo and probably other
+ // places. Issue 650903 is a concrete example of how not filtering a UM chain causes correctness
+ // issues in the LS. This code may still have bugs in it based on those incorrect assumptions.
+ // A narrow fix for 650903 is the only thing that was changed at the time of adding this comment.
+ if (d->needParentInfo && d->info.HasMethodFrame())
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "Cancelling UM Chain b/c it's internal\n"));
+ d->CancelUMChain();
+ return SWA_CONTINUE;
+ }
+
+ // If we're NOT ignoring non-method frames, and we didn't get an explicit ExitFrame somewhere
+ // in this chain, then don't send the non-leaf UM chain.
+ // The practical cause here is that w/o an exit frame, we don't know where the UM chain
+ // is starting (could be from anywhere in mscorwks). And we can't patch any random spot in
+ // mscorwks.
+ // Sending leaf-UM chains is OK b/c we can't step-out to them (they're the leaf, duh).
+ // (ignoreNonmethodFrames is generally false for stepping and true for regular
+ // end-user stacktraces.)
+ //
+ // This check is probably unnecessary. The client of the debugger stackwalker should make
+ // the decision themselves as to what to do with the UM chain callbacks.
+ //
+ // -- INCORRECT SEE SEE "CORRECTION" BELOW --
+ // Currently, both
+ // ControllerStackInfo and InterceptorStackInfo ignore UM chains completely anyway.
+ // (For an example, refer to the cctor example in the previous comment.)
+ // -- END INCORRECT --
+ //
+ // CORRECTION: See issue 650903 for a concrete example of ControllerStackInfo getting a different
+ // result based on a UM chain that wasn't filtered. This code may still have issues in
+ // it based on those incorrect assumptions. A narrow fix for 650903 is the only thing
+ // that was changed at the time of adding this comment.
+ if (!d->fHitExitFrame && !d->ShouldIgnoreNonmethodFrames() && !d->IsLeafCallback())
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "Cancelling UM Chain b/c it's stepper not requested\n"));
+ d->CancelUMChain();
+ return SWA_CONTINUE;
+ }
+
+
+ // Ok, we haven't cancelled it yet, so go ahead and send the UM chain.
+ FrameInfo f;
+ FramePointer fpRoot = d->GetUMChainEnd();
+ FramePointer fpLeaf = GetSP(d->GetUMChainStartRD());
+
+ // If we didn't actually get any range, then don't bother sending it.
+ if (fpRoot == fpLeaf)
+ {
+ d->CancelUMChain();
+ return SWA_CONTINUE;
+ }
+
+ f.InitForUMChain(fpRoot, d->GetUMChainStartRD());
+
+#ifdef FEATURE_COMINTEROP
+ if ((frame != NULL) &&
+ (frame->GetVTablePtr() == ComPlusMethodFrame::GetMethodFrameVPtr()))
+ {
+ // This condition is part of the fix for 650903. (See
+ // code:ControllerStackInfo::WalkStack and code:DebuggerStepper::TrapStepOut
+ // for the other parts.) Here, we know that the frame we're looking it may be
+ // a ComPlusMethodFrameGeneric (this info is not otherwise plubmed down into
+ // the walker; even though the walker does get to see "f.frame", that may not
+ // be "frame"). Given this, if the walker chooses to ignore these frames
+ // (while doing a Step Out during managed-only debugging), then it can ignore
+ // this frame.
+ f.fIgnoreThisFrameIfSuppressingUMChainFromComPlusMethodFrameGeneric = true;
+ }
+#endif // FEATURE_COMINTEROP
+
+ if (d->InvokeCallback(&f) == SWA_ABORT)
+ {
+ // don't need to cancel if they abort.
+ return SWA_ABORT;
+ }
+ d->CancelUMChain(); // now that we've sent it, we're done.
+
+
+ // Check for a M2U internal frame.
+ if (d->ShouldProvideInternalFrames() && (frame != NULL) && (frame != FRAME_TOP))
+ {
+ // We want to dispatch a M2U transition right after we dispatch the UM chain.
+ Frame::ETransitionType t = frame->GetTransitionType();
+ if (t == Frame::TT_M2U)
+ {
+ // Frame for a M2U transition.
+ FrameInfo fM2U;
+ fM2U.InitForM2UInternalFrame(pCF);
+ if (d->InvokeCallback(&fM2U) == SWA_ABORT)
+ {
+ return SWA_ABORT;
+ }
+ }
+ }
+
+
+ }
+
+ return SWA_CONTINUE;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// A frame pointer is a unique identifier for a particular stack location. This function returns the
+// frame pointer for the current frame, whether it is a method frame or an explicit frame.
+//
+// Arguments:
+// pData - the state of the current frame maintained by the debugger stackwalker
+// pCF - the CrawlFrame for the current callback by the real stackwalker (i.e. StackWalkFramesEx());
+// this is NULL for the case where we fake an extra callbakc to top off a debugger stackwalk
+//
+// Return Value:
+// the frame pointer for the current frame
+//
+
+FramePointer GetFramePointerForDebugger(DebuggerFrameData* pData, CrawlFrame* pCF)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ FramePointer fpResult;
+
+#if defined(WIN64EXCEPTIONS)
+ if (pData->info.frame == NULL)
+ {
+ // This is a managed method frame.
+ fpResult = FramePointer::MakeFramePointer((LPVOID)GetRegdisplayStackMark(&pData->info.registers));
+ }
+ else
+ {
+ // This is an actual frame.
+ fpResult = FramePointer::MakeFramePointer((LPVOID)(pData->info.frame));
+ }
+
+#else // !WIN64EXCEPTIONS
+ if ((pCF == NULL || !pCF->IsFrameless()) && pData->info.frame != NULL)
+ {
+ //
+ // If we're in an explicit frame now, and the previous frame was
+ // also an explicit frame, pPC will not have been updated. So
+ // use the address of the frame itself as fp.
+ //
+ fpResult = FramePointer::MakeFramePointer((LPVOID)(pData->info.frame));
+
+ LOG((LF_CORDB, LL_INFO100000, "GFPFD: Two explicit frames in a row; using frame address 0x%p\n",
+ pData->info.frame));
+ }
+ else
+ {
+ //
+ // Otherwise use pPC as the frame pointer, as this will be
+ // pointing to the return address on the stack.
+ //
+ fpResult = FramePointer::MakeFramePointer((LPVOID)GetRegdisplayStackMark(&(pData->regDisplay)));
+ }
+
+#endif // !WIN64EXCEPTIONS
+
+ LOG((LF_CORDB, LL_INFO100000, "GFPFD: Frame pointer is 0x%p\n", fpResult.GetSPValue()));
+
+ return fpResult;
+}
+
+
+#ifdef WIN64EXCEPTIONS
+//---------------------------------------------------------------------------------------
+//
+// This function is called to determine if we should start skipping funclets. If we should, then we return the
+// frame pointer for the parent method frame. Otherwise we return LEAF_MOST_FRAME. If we are already skipping
+// frames, then we return the current frame pointer for the parent method frame.
+//
+// The return value of this function corresponds to the return value of ExceptionTracker::FindParentStackFrame().
+// Refer to that function for more information.
+//
+// Arguments:
+// fpCurrentParentMarker - This is the current frame pointer of the parent method frame. It can be
+// LEAF_MOST_FRAME if we are not currently skipping funclets.
+// pCF - the CrawlFrame for the current callback from the real stackwalker
+// fIsNonFilterFuncletFrame - whether the current frame is a non-filter funclet frame
+//
+// Return Value:
+// LEAF_MOST_FRAME - skipping not required
+// ROOT_MOST_FRAME - skip one frame and try again
+// anything else - skip all frames up to but not including the returned frame pointer
+//
+
+inline FramePointer CheckForParentFP(FramePointer fpCurrentParentMarker, CrawlFrame* pCF, bool fIsNonFilterFuncletFrame)
+{
+ WRAPPER_NO_CONTRACT;
+
+ if (fpCurrentParentMarker == LEAF_MOST_FRAME)
+ {
+ // When we encounter a funclet, we simply stop processing frames until we hit the parent
+ // of the funclet. Funclets and their parents have the same MethodDesc pointers, and they
+ // should really be treated as one frame. However, we report both of them and let the callers
+ // decide what they want to do with them. For example, DebuggerThread::TraceAndSendStack()
+ // should never report both frames, but ControllerStackInfo::GetStackInfo() may need both to
+ // determine where to put a patch. We use the fpParent as a flag to indicate if we are
+ // searching for a parent of a funclet.
+ //
+ // Note that filter funclets are an exception. We don't skip them.
+ if (fIsNonFilterFuncletFrame)
+ {
+ // We really should be using the same structure, but FramePointer is used everywhere in the debugger......
+ StackFrame sfParent = g_pEEInterface->FindParentStackFrame(pCF);
+ return FramePointer::MakeFramePointer((LPVOID)sfParent.SP);
+ }
+ else
+ {
+ return LEAF_MOST_FRAME;
+ }
+ }
+ else
+ {
+ // Just return the current marker if we are already skipping frames.
+ return fpCurrentParentMarker;
+ }
+}
+#endif // WIN64EXCEPTIONS
+
+
+//-----------------------------------------------------------------------------
+// StackWalkAction DebuggerWalkStackProc(): This is the callback called
+// by the EE stackwalker.
+// Note that since we don't know what the frame pointer for frame
+// X is until we've looked at the caller of frame X, we actually end up
+// stashing the info and pData pointers in the DebuggerFrameDat struct, and
+// then invoking pCallback when we've moved up one level, into the caller's
+// frame. We use the needParentInfo field to indicate that the previous frame
+// needed this (parental) info, and so when it's true we should invoke
+// pCallback.
+// What happens is this: if the previous frame set needParentInfo, then we
+// do pCallback (and set needParentInfo to false).
+// Then we look at the current frame - if it's frameless (ie,
+// managed), then we set needParentInfo to callback in the next frame.
+// Otherwise we must be at a chain boundary, and so we set the chain reason
+// appropriately. We then figure out what type of frame it is, setting
+// flags depending on the type. If the user should see this frame, then
+// we'll set needParentInfo to record it's existence. Lastly, if we're in
+// a funky frame, we'll explicitly update the register set, since the
+// CrawlFrame doesn't do it automatically.
+//-----------------------------------------------------------------------------
+StackWalkAction DebuggerWalkStackProc(CrawlFrame *pCF, void *data)
+{
+ DebuggerFrameData *d = (DebuggerFrameData *)data;
+
+ if (pCF->IsNativeMarker())
+ {
+#ifdef WIN64EXCEPTIONS
+ // The tricky part here is that we want to skip all frames between a funclet method frame
+ // and the parent method frame UNLESS the funclet is a filter. Moreover, we should never
+ // let a native marker execute the rest of this method, so we just short-circuit it here.
+ if ((d->fpParent != LEAF_MOST_FRAME) || d->info.IsNonFilterFuncletFrame())
+ {
+ return SWA_CONTINUE;
+ }
+#endif // WIN64EXCEPTIONS
+
+ // This REGDISPLAY is for the native method immediately following the managed method for which
+ // we have received the previous callback, i.e. the native caller of the last managed method
+ // we have encountered.
+ REGDISPLAY* pRDSrc = pCF->GetRegisterSet();
+ d->BeginTrackingUMChain(GetSP(pRDSrc), pRDSrc);
+
+ return SWA_CONTINUE;
+ }
+
+ // Note that a CrawlFrame may have both a methoddesc & an EE Frame.
+ Frame *frame = g_pEEInterface->GetFrame(pCF);
+ MethodDesc *md = pCF->GetFunction();
+
+ LOG((LF_CORDB, LL_EVERYTHING, "Calling DebuggerWalkStackProc. Frame=0x%p, md=0x%p(%s), native_marker=%d\n",
+ frame, md, (md == NULL || md == (MethodDesc*)POISONC) ? "null" : md->m_pszDebugMethodName, pCF->IsNativeMarker() ));
+
+ // The fp for a frame must be obtained from the _next_ frame. Fill it in now for the previous frame, if appropriate.
+ if (d->needParentInfo)
+ {
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: NeedParentInfo.\n"));
+
+ d->info.fp = GetFramePointerForDebugger(d, pCF);
+
+#if defined(_DEBUG) && !defined(_TARGET_ARM_)
+ // Make sure the stackwalk is making progress.
+ // On ARM this is invalid as the stack pointer does necessarily have to move when unwinding a frame.
+ _ASSERTE(IsCloserToLeaf(d->previousFP, d->info.fp));
+
+ d->previousFP = d->info.fp;
+#endif // _DEBUG && !_TARGET_ARM_
+
+ d->needParentInfo = false;
+
+ {
+ // Don't invoke Stubs if we're not asking for internal frames.
+ bool fDoInvoke = true;
+ if (!d->ShouldProvideInternalFrames())
+ {
+ if (d->info.HasStubFrame())
+ {
+ fDoInvoke = false;
+ }
+ }
+
+ LOG((LF_CORDB, LL_INFO1000000, "DWSP: handling our target\n"));
+
+ if (fDoInvoke)
+ {
+ if (d->InvokeCallback(&d->info) == SWA_ABORT)
+ {
+ return SWA_ABORT;
+ }
+ }
+
+ // @todo - eventually we should be initing our frame-infos properly
+ // and thus should be able to remove this.
+ d->info.eStubFrameType = STUBFRAME_NONE;
+ }
+ } // if (d->needParentInfo)
+
+
+#ifdef WIN64EXCEPTIONS
+ // The tricky part here is that we want to skip all frames between a funclet method frame
+ // and the parent method frame UNLESS the funclet is a filter. We only have to check for fpParent
+ // here (instead of checking d->info.fIsFunclet and d->info.fIsFilter as well, as in the beginning of
+ // this method) is because at this point, fpParent is already set by the code above.
+ if (d->fpParent == LEAF_MOST_FRAME)
+#endif // WIN64EXCEPTIONS
+ {
+ // Track the UM chain after we flush any managed goo from the last iteration.
+ if (TrackUMChain(pCF, d) == SWA_ABORT)
+ {
+ return SWA_ABORT;
+ }
+ }
+
+
+ // Track if we want to send a callback for this Frame / Method
+ bool use=false;
+
+ //
+ // Examine the frame.
+ //
+
+ // We assume that the stack walker is just updating the
+ // register display we passed in - assert it to be sure
+ _ASSERTE(pCF->GetRegisterSet() == &d->regDisplay);
+
+#ifdef WIN64EXCEPTIONS
+ Frame* pPrevFrame = d->info.frame;
+
+ // Here we need to determine if we are in a non-leaf frame, in which case we want to adjust the relative offset.
+ // Also, we need to check if this frame has faulted (throws a native exception), since if it has, then it should be
+ // considered the leaf frame (and thus we don't need to update the relative offset).
+ if (pCF->IsActiveFrame() || pCF->HasFaulted())
+ {
+ d->info.fIsLeaf = true;
+ }
+ else if ( (pPrevFrame != NULL) &&
+ (pPrevFrame->GetFrameType() == Frame::TYPE_EXIT) &&
+ !HasExitRuntime(pPrevFrame, d, NULL) )
+ {
+ // This is for the inlined NDirectMethodFrameGeneric case. We have not exit the runtime yet, so the current
+ // frame should still be regarded as the leaf frame.
+ d->info.fIsLeaf = true;
+ }
+ else
+ {
+ d->info.fIsLeaf = false;
+ }
+
+ d->info.fIsFunclet = pCF->IsFunclet();
+ d->info.fIsFilter = false;
+ if (d->info.fIsFunclet)
+ {
+ d->info.fIsFilter = pCF->IsFilterFunclet();
+ }
+
+ if (pCF->IsFrameless())
+ {
+ // Check if we are skipping.
+ if (d->fpParent != LEAF_MOST_FRAME)
+ {
+ // If fpParent is ROOT_MOST_FRAME, then we just need to skip one frame. Otherwise, we should stop
+ // skipping if the current frame pointer matches fpParent. In either case, clear fpParent, and
+ // then check again.
+ if ((d->fpParent == ROOT_MOST_FRAME) ||
+ ExceptionTracker::IsUnwoundToTargetParentFrame(pCF, ConvertFPToStackFrame(d->fpParent)))
+ {
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Stopping to skip funclet at 0x%p.\n", d->fpParent.GetSPValue()));
+
+ d->fpParent = LEAF_MOST_FRAME;
+ d->fpParent = CheckForParentFP(d->fpParent, pCF, d->info.IsNonFilterFuncletFrame());
+ }
+ }
+ }
+
+#endif // WIN64EXCEPTIONS
+
+ d->info.frame = frame;
+ d->info.ambientSP = NULL;
+
+ // Record the appdomain that the thread was in when it
+ // was running code for this frame.
+ d->info.currentAppDomain = pCF->GetAppDomain();
+
+ // Grab all the info from CrawlFrame that we need to
+ // check for "Am I in an exeption code blob?" now.
+
+#ifdef WIN64EXCEPTIONS
+ // We are still searching for the parent of the last funclet we encounter.
+ if (d->fpParent != LEAF_MOST_FRAME)
+ {
+ // We do nothing here.
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Skipping to parent method frame at 0x%p.\n", d->fpParent.GetSPValue()));
+ }
+ else
+#endif // WIN64EXCEPTIONS
+ // We should ignore IL stubs with no frames in our stackwalking.
+ // The only exception is dynamic methods. We want to report them when SIS is turned on.
+ if ((md != NULL) && md->IsILStub() && pCF->IsFrameless())
+ {
+#ifdef FEATURE_STUBS_AS_IL
+ if(md->AsDynamicMethodDesc()->IsMulticastStub())
+ {
+ use = true;
+ d->info.managed = true;
+ d->info.internal = false;
+ }
+#endif
+ // We do nothing here.
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Skip frameless IL stub.\n"));
+ }
+ else
+ // For frames w/o method data, send them as an internal stub frame.
+ if ((md != NULL) && md->IsDynamicMethod())
+ {
+ // Only Send the frame if "InternalFrames" are requested.
+ // Else completely ignore it.
+ if (d->ShouldProvideInternalFrames())
+ {
+ d->info.InitForDynamicMethod(pCF);
+
+ // We'll loop around to get the FramePointer. Only modification to FrameInfo
+ // after this is filling in framepointer and resetting MD.
+ use = true;
+ }
+ }
+ else if (pCF->IsFrameless())
+ {
+ // Regular managed-method.
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Is frameless.\n"));
+ use = true;
+ d->info.managed = true;
+ d->info.internal = false;
+ d->info.chainReason = CHAIN_NONE;
+ d->needParentInfo = true; // Possibly need chain reason
+ d->info.relOffset = AdjustRelOffset(pCF, &(d->info));
+ d->info.pIJM = pCF->GetJitManager();
+ d->info.MethodToken = pCF->GetMethodToken();
+
+#ifdef _TARGET_X86_
+ // This is collecting the ambientSP a lot more than we actually need it. Only time we need it is
+ // inspecting local vars that are based off the ambient esp.
+ d->info.ambientSP = pCF->GetAmbientSPFromCrawlFrame();
+#endif
+ }
+ else
+ {
+ d->info.pIJM = NULL;
+ d->info.MethodToken = METHODTOKEN(NULL, 0);
+
+ //
+ // Retrieve any interception info
+ //
+
+ // Each interception type in the switch statement below is associated with a chain reason.
+ // The other chain reasons are:
+ // CHAIN_INTERCEPTION - not used
+ // CHAIN_PROCESS_START - not used
+ // CHAIN_THREAD_START - thread start
+ // CHAIN_ENTER_MANAGED - managed chain
+ // CHAIN_ENTER_UNMANAGED - unmanaged chain
+ // CHAIN_DEBUGGER_EVAL - not used
+ // CHAIN_CONTEXT_SWITCH - not used
+ // CHAIN_FUNC_EVAL - funceval
+
+ switch (frame->GetInterception())
+ {
+ case Frame::INTERCEPTION_CLASS_INIT:
+ //
+ // Fall through
+ //
+
+ // V2 assumes that the only thing the prestub intercepts is the class constructor
+ case Frame::INTERCEPTION_PRESTUB:
+ d->info.chainReason = CHAIN_CLASS_INIT;
+ break;
+
+ case Frame::INTERCEPTION_EXCEPTION:
+ d->info.chainReason = CHAIN_EXCEPTION_FILTER;
+ break;
+
+ case Frame::INTERCEPTION_CONTEXT:
+ d->info.chainReason = CHAIN_CONTEXT_POLICY;
+ break;
+
+ case Frame::INTERCEPTION_SECURITY:
+ d->info.chainReason = CHAIN_SECURITY;
+ break;
+
+ default:
+ d->info.chainReason = CHAIN_NONE;
+ }
+
+ //
+ // Look at the frame type to figure out how to treat it.
+ //
+
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Chain reason is 0x%X.\n", d->info.chainReason));
+
+ switch (frame->GetFrameType())
+ {
+ case Frame::TYPE_ENTRY: // We now ignore entry + exit frames.
+ case Frame::TYPE_EXIT:
+ case Frame::TYPE_HELPER_METHOD_FRAME:
+ case Frame::TYPE_INTERNAL:
+
+ /* If we have a specific interception type, use it. However, if this
+ is the top-most frame (with a specific type), we can ignore it
+ and it wont appear in the stack-trace */
+#define INTERNAL_FRAME_ACTION(d, use) \
+ (d)->info.managed = true; \
+ (d)->info.internal = false; \
+ use = true
+
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Frame type is TYPE_INTERNAL.\n"));
+ if (d->info.chainReason == CHAIN_NONE || pCF->IsActiveFrame())
+ {
+ use = false;
+ }
+ else
+ {
+ INTERNAL_FRAME_ACTION(d, use);
+ }
+ break;
+
+ case Frame::TYPE_INTERCEPTION:
+ case Frame::TYPE_SECURITY: // Security is a sub-type of interception
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Frame type is TYPE_INTERCEPTION/TYPE_SECURITY.\n"));
+ d->info.managed = true;
+ d->info.internal = true;
+ use = true;
+ break;
+
+ case Frame::TYPE_CALL:
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Frame type is TYPE_CALL.\n"));
+ // In V4, StubDispatchFrame is only used on 64-bit (and PPC?) but not on x86. x86 uses a
+ // different code path which sets up a HelperMethodFrame instead. In V4.5, x86 and ARM
+ // both use the 64-bit code path and they set up a StubDispatchFrame as well. This causes
+ // a problem in the debugger stackwalker (see Dev11 Issue 13229) since the two frame types
+ // are treated differently. More specifically, a StubDispatchFrame causes the debugger
+ // stackwalk to make an invalid callback, i.e. a callback which is not for a managed method,
+ // an explicit frame, or a chain.
+ //
+ // Ideally we would just change the StubDispatchFrame to behave like a HMF, but it's
+ // too big of a change for an in-place release. For now I'm just making surgical fixes in
+ // the debugger stackwalker. This may introduce behavioural changes in on X64, but the
+ // chance of that is really small. StubDispatchFrame is only used in the virtual stub
+ // disptch code path. It stays on the stack in a small time window and it's not likely to
+ // be on the stack while some managed methods closer to the leaf are on the stack. There is
+ // only one scenario I know of, and that's the repro for Dev11 13229, but that's for x86 only.
+ // The jitted code on X64 behaves differently.
+ //
+ // Note that there is a corresponding change in DacDbiInterfaceImpl::GetInternalFrameType().
+ if (frame->GetVTablePtr() == StubDispatchFrame::GetMethodFrameVPtr())
+ {
+ use = false;
+ }
+ else
+ {
+ d->info.managed = true;
+ d->info.internal = false;
+ use = true;
+ }
+ break;
+
+ case Frame::TYPE_FUNC_EVAL:
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Frame type is TYPE_FUNC_EVAL.\n"));
+ d->info.managed = true;
+ d->info.internal = true;
+ // This is actually a nop. We reset the chain reason in InitForFuncEval() below.
+ // So is a FuncEvalFrame a chain or an internal frame?
+ d->info.chainReason = CHAIN_FUNC_EVAL;
+
+ {
+ // We only show a FuncEvalFrame if the funceval is not trying to abort the thread.
+ FuncEvalFrame *pFuncEvalFrame = static_cast<FuncEvalFrame *>(frame);
+ use = pFuncEvalFrame->ShowFrame() ? true : false;
+ }
+
+ // Send Internal frame. This is "inside" (leafmost) the chain, so we send it first
+ // since sending starts from the leaf.
+ if (use && d->ShouldProvideInternalFrames())
+ {
+ FrameInfo f;
+ f.InitForFuncEval(pCF);
+ if (d->InvokeCallback(&f) == SWA_ABORT)
+ {
+ return SWA_ABORT;
+ }
+ }
+
+ break;
+
+ // Put frames we want to ignore here:
+ case Frame::TYPE_MULTICAST:
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Frame type is TYPE_MULTICAST.\n"));
+ if (d->ShouldIgnoreNonmethodFrames())
+ {
+ // Multicast frames exist only to gc protect the arguments
+ // between invocations of a delegate. They don't have code that
+ // we can (currently) show the user (we could change this with
+ // work, but why bother? It's an internal stub, and even if the
+ // user could see it, they can't modify it).
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Skipping frame 0x%x b/c it's "
+ "a multicast frame!\n", frame));
+ use = false;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: NOT Skipping frame 0x%x even thought it's "
+ "a multicast frame!\n", frame));
+ INTERNAL_FRAME_ACTION(d, use);
+ }
+ break;
+
+#ifdef FEATURE_REMOTING
+ case Frame::TYPE_TP_METHOD_FRAME:
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Frame type is TYPE_TP_METHOD_FRAME.\n"));
+ if (d->ShouldIgnoreNonmethodFrames())
+ {
+ // Transparant Proxies push a frame onto the stack that they
+ // use to figure out where they're really going; this frame
+ // doesn't actually contain any code, although it does have
+ // enough info into fooling our routines into thinking it does:
+ // Just ignore these.
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Skipping frame 0x%x b/c it's "
+ "a transparant proxy frame!\n", frame));
+ use = false;
+ }
+ else
+ {
+ // Otherwise do the same thing as for internal frames
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: NOT Skipping frame 0x%x even though it's "
+ "a transparant proxy frame!\n", frame));
+ INTERNAL_FRAME_ACTION(d, use);
+ }
+ break;
+#endif
+ default:
+ _ASSERTE(!"Invalid frame type!");
+ break;
+ }
+ }
+
+
+ // Check for ICorDebugInternalFrame stuff.
+ // These callbacks are dispatched out of band.
+ if (d->ShouldProvideInternalFrames() && (frame != NULL) && (frame != FRAME_TOP))
+ {
+ Frame::ETransitionType t = frame->GetTransitionType();
+ FrameInfo f;
+ bool fUse = false;
+
+ if (t == Frame::TT_U2M)
+ {
+ // We can invoke the Internal U2M frame now.
+ f.InitForU2MInternalFrame(pCF);
+ fUse = true;
+ }
+ else if (t == Frame::TT_AppDomain)
+ {
+ // Internal frame for an Appdomain transition.
+ // We used to ignore frames for ADs which we hadn't sent a Create event for yet. In V3 we send AppDomain
+ // create events immediately (before any assemblies are loaded), so this should no longer be an issue.
+ f.InitForADTransition(pCF);
+ fUse = true;
+ }
+
+ // Frame's setup. Now invoke the callback.
+ if (fUse)
+ {
+ if (d->InvokeCallback(&f) == SWA_ABORT)
+ {
+ return SWA_ABORT;
+ }
+ }
+ } // should we give frames?
+
+
+
+ if (use)
+ {
+ //
+ // If we are returning a complete stack walk from the helper thread, then we
+ // need to gather information to instantiate generics. However, a stepper doing
+ // a stackwalk does not need this information, so skip in that case.
+ //
+ if (d->ShouldIgnoreNonmethodFrames())
+ {
+ // Finding sizes of value types on the argument stack while
+ // looking for the arg runs the class loader in non-load mode.
+ ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
+ d->info.exactGenericArgsToken = pCF->GetExactGenericArgsToken();
+ }
+ else
+ {
+ d->info.exactGenericArgsToken = NULL;
+ }
+
+ d->info.md = md;
+ CopyREGDISPLAY(&(d->info.registers), &(d->regDisplay));
+
+#if defined(_TARGET_AMD64_)
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Saving REGDISPLAY with sp = 0x%p, pc = 0x%p.\n",
+ GetRegdisplaySP(&(d->info.registers)),
+ GetControlPC(&(d->info.registers))));
+#endif // _TARGET_AMD64_
+
+ d->needParentInfo = true;
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: Setting needParentInfo\n"));
+ }
+
+#if defined(WIN64EXCEPTIONS)
+ d->fpParent = CheckForParentFP(d->fpParent, pCF, d->info.IsNonFilterFuncletFrame());
+#endif // WIN64EXCEPTIONS
+
+ //
+ // The stackwalker doesn't update the register set for the
+ // case where a non-frameless frame is returning to another
+ // non-frameless frame. Cover this case.
+ //
+ // !!! This assumes that updating the register set multiple times
+ // for a given frame times is not a bad thing...
+ //
+ if (!pCF->IsFrameless())
+ {
+ LOG((LF_CORDB, LL_INFO100000, "DWSP: updating regdisplay.\n"));
+ pCF->GetFrame()->UpdateRegDisplay(&d->regDisplay);
+ }
+
+ return SWA_CONTINUE;
+}
+
+#if defined(_TARGET_X86_) && defined(FEATURE_INTEROP_DEBUGGING)
+// Helper to get the Wait-Sleep-Join bit from the thread
+bool IsInWaitSleepJoin(Thread * pThread)
+{
+ // Partial User state is sufficient because that has the bit we're checking against.
+ CorDebugUserState cts = g_pEEInterface->GetPartialUserState(pThread);
+ return ((cts & USER_WAIT_SLEEP_JOIN) != 0);
+}
+
+//-----------------------------------------------------------------------------
+// Decide if we should send an UM leaf chain.
+// This geoes through a bunch of heuristics.
+// The driving guidelines here are:
+// - we try not to send an UM chain if it's just internal mscorwks stuff
+// and we know it can't have native user code.
+// (ex, anything beyond a filter context, various hijacks, etc).
+// - If it may have native user code, we send it anyway.
+//-----------------------------------------------------------------------------
+bool ShouldSendUMLeafChain(Thread * pThread)
+{
+ // If we're in shutodown, don't bother trying to sniff for an UM leaf chain.
+ // @todo - we'd like to never even be trying to stack trace on shutdown, this
+ // comes up when we do helper thread duty on shutdown.
+ if (g_fProcessDetach)
+ {
+ return false;
+ }
+
+ if (pThread->IsUnstarted() || pThread->IsDead())
+ {
+ return false;
+ }
+
+ // If a thread is suspended for sync purposes, it was suspended from managed
+ // code and the only native code is a mscorwks hijack.
+ // There are a few caveats here:
+ // - This means a thread will lose it's UM chain. But what if a user inactive thread
+ // enters the CLR from native code and hits a GC toggle? We'll lose that entire
+ // UM chain.
+ // - at a managed-only stop, preemptive threads are still live. Thus a thread
+ // may not have this state set, run a little, try to enter the GC, and then get
+ // this state set. Thus we'll lose the UM chain right out from under our noses.
+ Thread::ThreadState ts = pThread->GetSnapshotState();
+ if ((ts & Thread::TS_SyncSuspended) != 0)
+ {
+ // If we've been stopped inside the runtime (eg, at a gc-toggle) but
+ // not actually at a stopping context, then the thread must have some
+ // leafframes in mscorwks.
+ // We can detect this case by checking if GetManagedStoppedCtx(pThread) == NULL.
+ // This is very significant for notifcations (like LogMessage) that are
+ // dispatches from within mscorwks w/o a filter context.
+ // We don't send a UM chain for these cases because that would
+ // cause managed debug events to be dispatched w/ UM chains on the callstack.
+ // And that just seems wrong ...
+
+ return false;
+ }
+
+#ifdef FEATURE_HIJACK
+ // Hijacked is only on non-pal.
+ if ((ts & Thread::TS_Hijacked) != 0)
+ {
+ return false;
+ }
+#endif
+
+ // This is pretty subjective. If we have a thread stopped in a managed sleep,
+ // managed wait, or managed join, then don't bother showing the native end of the
+ // stack. This check can be removed w/o impacting correctness.
+ // @todo - may be a problem if Sleep/Wait/Join go through a hosting interface
+ // which lands us in native user code.
+ // Partial User state is sufficient because that has the bit we're checking against.
+ if (IsInWaitSleepJoin(pThread))
+ {
+ return false;
+ }
+
+ // If we're tracing ourselves, we must be in managed code.
+ // Native user code can't initiate a managed stackwalk.
+ if (pThread == GetThread())
+ {
+ return false;
+ }
+
+ return true;
+}
+
+//-----------------------------------------------------------------------------
+// Prepare a Leaf UM chain. This assumes we should send an UM leaf chain.
+// Returns true if we actually prep for an UM leaf,
+// false if we don't.
+//-----------------------------------------------------------------------------
+bool PrepareLeafUMChain(DebuggerFrameData * pData, CONTEXT * pCtxTemp)
+{
+ // Get the current user context (depends on if we're the active thread or not).
+ Thread * thread = pData->GetThread();
+ REGDISPLAY * pRDSrc = NULL;
+ REGDISPLAY rdTemp;
+
+
+#ifdef _DEBUG
+ // Anybody stopped at an native debug event (and hijacked) should have a filter ctx.
+ if (thread->GetInteropDebuggingHijacked() && (thread->GetFrame() != NULL) && (thread->GetFrame() != FRAME_TOP))
+ {
+ _ASSERTE(g_pEEInterface->GetThreadFilterContext(thread) != NULL);
+ }
+#endif
+
+ // If we're hijacked, then we assume we're in native code. This covers the active thread case.
+ if (g_pEEInterface->GetThreadFilterContext(thread) != NULL)
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "DWS - sending special case UM Chain.\n"));
+
+ // This will get it from the filter ctx.
+ pRDSrc = &(pData->regDisplay);
+ }
+ else
+ {
+ // For inactive thread, we may not be hijacked. So just get the current ctx.
+ // This will use a filter ctx if we have one.
+ // We may suspend a thread in native code w/o hijacking it, so it's still at it's live context.
+ // This can happen when we get a debug event on 1 thread; and then switch to look at another thread.
+ // This is very common when debugging apps w/ cross-thread causality (including COM STA objects)
+ pRDSrc = &rdTemp;
+
+ bool fOk;
+
+
+ // We need to get thread's context (InitRegDisplay will do that under the covers).
+ // If this is our thread, we're in bad shape. Fortunately that should never happen.
+ _ASSERTE(thread != GetThread());
+
+ Thread::SuspendThreadResult str = thread->SuspendThread();
+ if (str != Thread::STR_Success)
+ {
+ return false;
+ }
+
+ // @todo - this context is less important because the RS will overwrite it with the live context.
+ // We don't need to even bother getting it. We can just intialize the regdisplay w/ a sentinal.
+ fOk = g_pEEInterface->InitRegDisplay(thread, pRDSrc, pCtxTemp, false);
+ thread->ResumeThread();
+
+ if (!fOk)
+ {
+ return false;
+ }
+ }
+
+ // By now we have a Regdisplay from somewhere (filter ctx, current ctx, etc).
+ _ASSERTE(pRDSrc != NULL);
+
+ // If we're stopped in mscorwks (b/c of a handler for a managed BP), then the filter ctx will
+ // still be set out in jitted code.
+ // If our regdisplay is out in UM code , then send a UM chain.
+ BYTE* ip = (BYTE*) GetControlPC(pRDSrc);
+ if (g_pEEInterface->IsManagedNativeCode(ip))
+ {
+ return false;
+ }
+
+ LOG((LF_CORDB, LL_EVERYTHING, "DWS - sending leaf UM Chain.\n"));
+
+ // Get the ending fp. We may not have any managed goo on the stack (eg, native thread called
+ // into a managed method and then returned from it).
+ FramePointer fpRoot;
+ Frame * pFrame = thread->GetFrame();
+ if ((pFrame != NULL) && (pFrame != FRAME_TOP))
+ {
+ fpRoot = FramePointer::MakeFramePointer((void*) pFrame);
+ }
+ else
+ {
+ fpRoot= ROOT_MOST_FRAME;
+ }
+
+
+ // Start tracking an UM chain. We won't actually send the UM chain until
+ // we hit managed code. Since this is the leaf, we don't need to send an
+ // Enter-Managed chain either.
+ pData->BeginTrackingUMChain(fpRoot, pRDSrc);
+
+ return true;
+}
+#endif // defined(_TARGET_X86_) && defined(FEATURE_INTEROP_DEBUGGING)
+
+//-----------------------------------------------------------------------------
+// Entry function for the debugger's stackwalking layer.
+// This will invoke pCallback(FrameInfo * pInfo, pData) for each 'frame'
+//-----------------------------------------------------------------------------
+StackWalkAction DebuggerWalkStack(Thread *thread,
+ FramePointer targetFP,
+ CONTEXT *context,
+ BOOL contextValid,
+ DebuggerStackCallback pCallback,
+ void *pData,
+ BOOL fIgnoreNonmethodFrames)
+{
+ _ASSERTE(context != NULL);
+
+ DebuggerFrameData data;
+
+ StackWalkAction result = SWA_CONTINUE;
+ bool fRegInit = false;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "DebuggerWalkStack called\n"));
+
+ if(contextValid || g_pEEInterface->GetThreadFilterContext(thread) != NULL)
+ {
+ fRegInit = g_pEEInterface->InitRegDisplay(thread, &data.regDisplay, context, contextValid != 0);
+ _ASSERTE(fRegInit);
+ }
+
+ if (!fRegInit)
+ {
+#if defined(CONTEXT_EXTENDED_REGISTERS)
+
+ // Note: the size of a CONTEXT record contains the extended registers, but the context pointer we're given
+ // here may not have room for them. Therefore, we only set the non-extended part of the context to 0.
+ memset((void *)context, 0, offsetof(CONTEXT, ExtendedRegisters));
+#else
+ memset((void *)context, 0, sizeof(CONTEXT));
+#endif
+ memset((void *)&data, 0, sizeof(data));
+
+#if defined(_TARGET_X86_)
+ // @todo - this seems pointless. context->Eip will be 0; and when we copy it over to the DebuggerRD,
+ // the context will be completely null.
+ data.regDisplay.ControlPC = context->Eip;
+ data.regDisplay.PCTAddr = (TADDR)&(context->Eip);
+
+#elif defined(_WIN64) || defined(_TARGET_ARM_)
+ //
+ // @TODO: this should be the code for all platforms now that it uses FillRegDisplay,
+ // which encapsulates the platform variances. This could all be avoided if we used
+ // StackWalkFrames instead of StackWalkFramesEx.
+ //
+ ::SetIP(context, 0);
+ ::SetSP(context, 0);
+ FillRegDisplay(&data.regDisplay, context);
+
+ ::SetSP(data.regDisplay.pCallerContext, 0);
+#else
+ PORTABILITY_ASSERT("DebuggerWalkStack needs extended register information on this platform.");
+
+#endif
+ }
+
+ data.Init(thread, targetFP, fIgnoreNonmethodFrames, pCallback, pData);
+
+
+#if defined(_TARGET_X86_) && defined(FEATURE_INTEROP_DEBUGGING)
+ CONTEXT ctxTemp; // Temp context for Leaf UM chain. Need it here so that it stays alive for whole stackwalk.
+
+ // Important case for Interop Debugging -
+ // We may be stopped in Native Code (perhaps at a BP) w/ no Transition frame on the stack!
+ // We still need to send an UM Chain for this case.
+ if (ShouldSendUMLeafChain(thread))
+ {
+ // It's possible this may fail (eg, GetContext fails on win9x), so we're not guaranteed
+ // to be sending an UM chain even though we want to.
+ PrepareLeafUMChain(&data, &ctxTemp);
+
+ }
+#endif // defined(_TARGET_X86_) && defined(FEATURE_INTEROP_DEBUGGING)
+
+ if ((result != SWA_FAILED) && !thread->IsUnstarted() && !thread->IsDead())
+ {
+ int flags = 0;
+
+ result = g_pEEInterface->StackWalkFramesEx(thread, &data.regDisplay,
+ DebuggerWalkStackProc,
+ &data, flags | HANDLESKIPPEDFRAMES | NOTIFY_ON_U2M_TRANSITIONS | ALLOW_ASYNC_STACK_WALK);
+ }
+ else
+ {
+ result = SWA_DONE;
+ }
+
+ if (result == SWA_DONE || result == SWA_FAILED) // SWA_FAILED if no frames
+ {
+ // Since Debugger StackWalk callbacks are delayed 1 frame from EE stackwalk callbacks, we
+ // have to touch up the 1 leftover here.
+ //
+ // This is safe only because we use the REGDISPLAY of the native marker callback for any subsequent
+ // explicit frames which do not update the REGDISPLAY. It's kind of fragile. If we can change
+ // the x86 real stackwalker to unwind one frame ahead of time, we can get rid of this code.
+ if (data.needParentInfo)
+ {
+ data.info.fp = GetFramePointerForDebugger(&data, NULL);
+
+ if (data.InvokeCallback(&data.info) == SWA_ABORT)
+ {
+ return SWA_ABORT;
+ }
+ }
+
+ //
+ // Top off the stack trace as necessary w/ a thread-start chain.
+ //
+ REGDISPLAY * pRegDisplay = &(data.regDisplay);
+ if (data.IsTrackingUMChain())
+ {
+ // This is the common case b/c managed code gets called from native code.
+ pRegDisplay = data.GetUMChainStartRD();
+ }
+
+
+ // All Thread starts in unmanaged code (at something like kernel32!BaseThreadStart),
+ // so all ThreadStart chains must be unmanaged.
+ // InvokeCallback will fabricate the EnterManaged chain if we haven't already sent one.
+ data.info.InitForThreadStart(thread, pRegDisplay);
+ result = data.InvokeCallback(&data.info);
+
+ }
+ return result;
+}
diff --git a/src/debug/ee/frameinfo.h b/src/debug/ee/frameinfo.h
new file mode 100644
index 0000000000..e696d11976
--- /dev/null
+++ b/src/debug/ee/frameinfo.h
@@ -0,0 +1,210 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: frameinfo.h
+//
+
+//
+// Debugger stack walker
+//
+//*****************************************************************************
+
+#ifndef FRAMEINFO_H_
+#define FRAMEINFO_H_
+
+/* ========================================================================= */
+
+/* ------------------------------------------------------------------------- *
+ * Classes
+ * ------------------------------------------------------------------------- */
+
+class DebuggerJitInfo;
+
+// struct FrameInfo: Contains the information that will be handed to
+// DebuggerStackCallback functions (along with their own, individual
+// pData pointers).
+//
+// Frame *frame: The current explicit frame. NULL implies that
+// the method frame is frameless, meaning either unmanaged or managed. This
+// is set to be FRAME_TOP (0xFFffFFff) if the frame is the topmost, EE
+// placed frame.
+//
+// MethodDesc *md: MetdhodDesc for the method that's
+// executing in this method frame. Will be NULL if there is no MethodDesc
+// If we're in generic code this may be a representative (i.e. canonical)
+// MD, and extra information is available in the exactGenericArgsToken.
+// For explicit frames, this may point to the method the explicit frame refers to
+// (i.e. the method being jitted, or the interface method being called through
+// COM interop), however it must always point to a method within the same
+// domain of the explicit frame. Therefore, it is not used to point to the target of
+// FuncEval frames since the target may be in a different domain.
+//
+// void *fp: frame pointer. Actually filled in from
+// caller (parent) frame, so the DebuggerStackWalkProc must delay
+// the user callback for one frame. This is not technically necessary on WIN64, but
+// we follow the x86 model to keep things simpler. We should really consider changing
+// the real stackwalker on x86 to unwind one frame ahead of time like the 64-bit one.
+struct FrameInfo
+{
+public:
+ Frame *frame;
+ MethodDesc *md;
+
+ // the register set of the frame being reported
+ REGDISPLAY registers;
+ FramePointer fp;
+
+ // This field is propagated to the right side to become CordbRegisterSet::m_quicklyUnwind.
+ // If it is true, then the registers reported in the REGDISPLAY are invalid. It is only set to
+ // true in InitForEnterManagedChain(). In that case, we are passing a NULL REGDISPLAY anyway.
+ // This is such a misnomer.
+ bool quickUnwind;
+
+ // Set to true if we are dealing with an internal explicit frame. Currently this is only true
+ // for prestub frames, security frames, funceval frames, and certain debugger-specific frames
+ // (e.g. DebuggerClassInitMarkFrame, DebuggerSecurityCodeMarkFrame).
+ // This affects HasMethodFrame() below.
+ bool internal;
+
+ // whether the state contained in the FrameInfo represents a managed or unmanaged method frame/stub/chain;
+ // corresponds to ICorDebugChain::IsManaged()
+ bool managed;
+
+ // Native offset from beginning of the method.
+ ULONG relOffset;
+
+ // The ambient stackpointer. This can be use to compute esp-relative local variables,
+ // which can be common in frameless methods.
+ TADDR ambientSP;
+
+ // These two fields are only set for managed method frames.
+ IJitManager *pIJM;
+ METHODTOKEN MethodToken;
+
+ // This represents the current domain of the frame itself, and which
+ // the method specified by 'md' is executing in.
+ AppDomain *currentAppDomain;
+
+ // only set for stackwalking, not stepping
+ void *exactGenericArgsToken;
+
+#if defined(WIN64EXCEPTIONS)
+ // This field is only used on IA64 to determine which registers are available and
+ // whether we need to adjust the IP.
+ bool fIsLeaf;
+
+ // These two fields are used for funclets.
+ bool fIsFunclet;
+ bool fIsFilter;
+
+ bool IsFuncletFrame() { return fIsFunclet; }
+ bool IsFilterFrame() { return fIsFilter; }
+ bool IsNonFilterFuncletFrame() { return (fIsFunclet && !fIsFilter); }
+#endif // WIN64EXCEPTIONS
+
+
+ // A ridiculous flag that is targetting a very narrow fix at issue 650903 (4.5.1/Blue).
+ // This is set when the currently walked frame is a ComPlusMethodFrameGeneric. If the
+ // dude doing the walking is trying to ignore such frames (see
+ // code:ControllerStackInfo::m_suppressUMChainFromComPlusMethodFrameGeneric), AND
+ // this is set, then the walker just continues on to the next frame, without
+ // erroneously identifying this frame as the target frame. Only used during "Step
+ // Out" to a managed frame (i.e., managed-only debugging).
+ bool fIgnoreThisFrameIfSuppressingUMChainFromComPlusMethodFrameGeneric;
+
+ // In addition to a Method, a FrameInfo may also represent either a Chain or a Stub (but not both).
+ // chainReason corresponds to ICorDebugChain::GetReason().
+ CorDebugChainReason chainReason;
+ CorDebugInternalFrameType eStubFrameType;
+
+ // Helpers for initializing a FrameInfo for a chain or a stub frame.
+ void InitForM2UInternalFrame(CrawlFrame * pCF);
+ void InitForU2MInternalFrame(CrawlFrame * pCF);
+ void InitForADTransition(CrawlFrame * pCF);
+ void InitForDynamicMethod(CrawlFrame * pCF);
+ void InitForFuncEval(CrawlFrame * pCF);
+ void InitForThreadStart(Thread *thread, REGDISPLAY * pRDSrc);
+ void InitForUMChain(FramePointer fpRoot, REGDISPLAY * pRDSrc);
+ void InitForEnterManagedChain(FramePointer fpRoot);
+
+ // Does this FrameInfo represent a method frame? (aka a frameless frame)
+ // This may be combined w/ both StubFrames and ChainMarkers.
+ bool HasMethodFrame() { return md != NULL && !internal; }
+
+ // Is this frame for a stub?
+ // This is mutually exclusive w/ Chain Markers.
+ // StubFrames may also have a method frame as a "hint". Ex, a stub frame for a
+ // M2U transition may have the Method for the Managed Wrapper for the unmanaged call.
+ // Stub frames map to internal frames on the RS. They use the same enum
+ // (CorDebugInternalFrameType) to represent the type of the frame.
+ bool HasStubFrame() { return eStubFrameType != STUBFRAME_NONE; }
+
+ // Does this FrameInfo mark the start of a new chain? (A Frame info may both
+ // start a chain and represent a method)
+ bool HasChainMarker() { return chainReason != CHAIN_NONE; }
+
+ // Helper functions for retrieving the DJI and the DMI
+ DebuggerJitInfo * GetJitInfoFromFrame();
+ DebuggerMethodInfo * GetMethodInfoFromFrameOrThrow();
+
+ // Debug helper which nops in retail; and asserts invariants in debug.
+#ifdef _DEBUG
+ void AssertValid();
+
+ // Debug helpers to get name of frame. Useful in asserts + log statements.
+ LPCUTF8 DbgGetClassName();
+ LPCUTF8 DbgGetMethodName();
+#endif
+
+protected:
+ // These are common internal helpers shared by the other Init*() helpers above.
+ void InitForScratchFrameInfo();
+ void InitFromStubHelper(CrawlFrame * pCF, MethodDesc * pMDHint, CorDebugInternalFrameType type);
+
+};
+
+//StackWalkAction (*DebuggerStackCallback): This callback will
+// be invoked by DebuggerWalkStackProc at each method frame and explicit frame, passing the FrameInfo
+// and callback-defined pData to the method. The callback then returns a
+// SWA - if SWA_ABORT is returned then the walk stops immediately. If
+// SWA_CONTINUE is called, then the frame is walked & the next higher frame
+// will be used. If the current explicit frame is at the root of the stack, then
+// in the next iteration, DSC will be invoked with FrameInfo::frame == FRAME_TOP
+typedef StackWalkAction (*DebuggerStackCallback)(FrameInfo *frame, void *pData);
+
+//StackWalkAction DebuggerWalkStack(): Sets up everything for a
+// stack walk for the debugger, starts the stack walk (via
+// g_pEEInterface->StackWalkFramesEx), then massages the output. Note that it
+// takes a DebuggerStackCallback as an argument, but at each method frame and explicit frame
+// DebuggerWalkStackProc gets called, which in turn calls the
+// DebuggerStackCallback.
+// Thread * thread: the thread on which to do a stackwalk
+// void *targetFP: If you're looking for a specific frame, then
+// this should be set to the fp for that frame, and the callback won't
+// be called until that frame is reached. Otherwise, set it to LEAF_MOST_FRAME &
+// the callback will be called on every frame.
+// CONTEXT *context: Never NULL, b/c the callbacks require the
+// CONTEXT as a place to store some information. Either it points to an
+// uninitialized CONTEXT (contextValid should be false), or
+// a pointer to a valid CONTEXT for the thread. If it's NULL, InitRegDisplay
+// will fill it in for us, so we shouldn't go out of our way to set this up.
+// bool contextValid: TRUE if context points to a valid CONTEXT, FALSE
+// otherwise.
+// DebuggerStackCallback pCallback: User supplied callback to
+// be invoked at every frame that's at targetFP or higher.
+// void *pData: User supplied data that we shuffle around,
+// and then hand to pCallback.
+// BOOL fIgnoreNonmethodFrames: Generally true for end user stackwalking (e.g. displaying a stack trace) and
+// false for stepping (e.g. stepping out).
+
+StackWalkAction DebuggerWalkStack(Thread *thread,
+ FramePointer targetFP,
+ T_CONTEXT *pContext,
+ BOOL contextValid,
+ DebuggerStackCallback pCallback,
+ void *pData,
+ BOOL fIgnoreNonmethodFrames);
+
+#endif // FRAMEINFO_H_
diff --git a/src/debug/ee/funceval.cpp b/src/debug/ee/funceval.cpp
new file mode 100644
index 0000000000..93f5dcdc60
--- /dev/null
+++ b/src/debug/ee/funceval.cpp
@@ -0,0 +1,3990 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+// ****************************************************************************
+// File: funceval.cpp
+//
+
+//
+// funceval.cpp - Debugger func-eval routines.
+//
+// ****************************************************************************
+// Putting code & #includes, #defines, etc, before the stdafx.h will
+// cause the code,etc, to be silently ignored
+
+
+#include "stdafx.h"
+#include "debugdebugger.h"
+#include "ipcmanagerinterface.h"
+#include "../inc/common.h"
+#include "perflog.h"
+#include "eeconfig.h" // This is here even for retail & free builds...
+#include "../../dlls/mscorrc/resource.h"
+
+#ifdef FEATURE_REMOTING
+#include "remoting.h"
+#endif
+
+#include "context.h"
+#include "vars.hpp"
+#include "threads.h"
+#include "appdomain.inl"
+#include <limits.h>
+#include "ilformatter.h"
+
+#ifndef DACCESS_COMPILE
+
+//
+// This is the main file for processing func-evals. Nestle in
+// with a cup o' tea and read on.
+//
+// The most common case is handled in GCProtectArgsAndDoNormalFuncEval(), which follows
+// all the comments below. The two other corner cases are handled in
+// FuncEvalHijackWorker(), and are extremely straight-forward.
+//
+// There are several steps to successfully processing a func-eval. At a
+// very high level, the first step is to gather all the information necessary
+// to make the call (specifically, gather arg info and method info); the second
+// step is to actually make the call to managed code; finally, the third step
+// is to take all results and unpackage them.
+//
+// The first step (gathering arg and method info) has several critical passes that
+// must be made.
+// a) Protect all passed in args from a GC.
+// b) Transition into the appropriate AppDomain if necessary
+// c) Pre-allocate object for 'new' calls and, if necessary, box the 'this' argument. (May cause a GC)
+// d) Gather method info (May cause GC)
+// e) Gather info from runtime about args. (May cause a GC)
+// f) Box args that need to be, GC-protecting the newly boxed items. (May cause a GC)
+// g) Pre-allocate object for return values. (May cause a GC)
+// h) Copy to pBufferForArgsArray all the args. This array is used to hold values that
+// may need writable memory for ByRef args.
+// i) Create and load pArgumentArray to be passed as the stack for the managed call.
+// NOTE: From the time we load the first argument into the stack we cannot cause a GC
+// as the argument array cannot be GC-protected.
+//
+// The second step (Making the managed call), is relatively easy, and is a single call.
+//
+// The third step (unpacking all results), has a couple of passes as well.
+// a) Copy back all resulting values.
+// b) Free all temporary work memory.
+//
+//
+// The most difficult part of doing a func-eval is the first step, since once you
+// have everything set up, unpacking and calling are reverse, gc-safe, operations. Thus,
+// elaboration is needed on the first step.
+//
+// a) Protect all passed in args from a GC. This must be done in a gc-forbid region,
+// and the code path to this function must not trigger a gc either. In this function five
+// parallel arrays are used: pObjectRefArray, pMaybeInteriorPtrArray, pByRefMaybeInteriorPtrArray,
+// pBufferForArgsArray, and pArguments.
+// pObjectRefArray is used to gc-protect all arguments and results that are objects.
+// pMaybeInteriorPtrArray is used to gc-protect all arguments that might be pointers
+// to an interior of a managed object.
+// pByRefMaybeInteriorPtrArray is similar to pMaybeInteriorPtrArray, except that it protects the
+// address of the arguments instead of the arguments themselves. This is needed because we may have
+// by ref arguments whose address is an interior pointer into the GC heap.
+// pBufferForArgsArray is used strictly as a buffer for copying primitives
+// that need to be passed as ByRef, or may be enregistered. This array also holds
+// handles.
+// These first two arrays are mutually exclusive, that is, if there is an entry
+// in one array at index i, there should be no entry in either of the other arrays at
+// the same index.
+// pArguments is used as the complete array of arguments to pass to the managed function.
+//
+// Unfortunately the necessary information to complete pass (a) perfectly may cause a gc, so
+// instead, pass (a) is over-aggressive and protects the following: All object refs into
+// pObjectRefArray, and puts all values that could be raw pointers into pMaybeInteriorPtrArray.
+//
+// b) Discovers the method to be called, and if it is a 'new' allocate an object for the result.
+//
+// c) Gather information about the method that will be called.
+//
+// d) Here we gather information from the method signature which tells which args are
+// ByRef and various other flags. We will use this information in later passes.
+//
+// e) Using the information in pass (c), for each argument: box arguments, placing newly
+// boxed items into pObjectRefArray immediately after creating them.
+//
+// f) Pre-allocate any object for a returned value.
+//
+// g) Using the information is pass (c), all arguments are copied into a scratch buffer before
+// invoking the managed function.
+//
+// h) pArguments is loaded from the pre-allocated return object, the individual elements
+// of the other 3 arrays, and from any non-ByRef literals. This is the complete stack
+// to be passed to the managed function. For performance increase, it can remove any
+// overly aggressive items that were placed in pMaybeInteriorPtrArray.
+//
+
+//
+// IsElementTypeSpecial()
+//
+// This is a simple function used to check if a CorElementType needs special handling for func eval.
+//
+// parameters: type - the CorElementType which we need to check
+//
+// return value: true if the specified type needs special handling
+//
+inline static bool IsElementTypeSpecial(CorElementType type)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return ((type == ELEMENT_TYPE_CLASS) ||
+ (type == ELEMENT_TYPE_OBJECT) ||
+ (type == ELEMENT_TYPE_ARRAY) ||
+ (type == ELEMENT_TYPE_SZARRAY) ||
+ (type == ELEMENT_TYPE_STRING));
+}
+
+//
+// GetAndSetLiteralValue()
+//
+// This helper function extracts the value out of the source pointer while taking into account alignment and size.
+// Then it stores the value into the destination pointer, again taking into account alignment and size.
+//
+// parameters: pDst - destination pointer
+// dstType - the CorElementType of the destination value
+// pSrc - source pointer
+// srcType - the CorElementType of the source value
+//
+// return value: none
+//
+inline static void GetAndSetLiteralValue(LPVOID pDst, CorElementType dstType, LPVOID pSrc, CorElementType srcType)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ UINT64 srcValue;
+
+ // Retrieve the value using the source CorElementType.
+ switch (g_pEEInterface->GetSizeForCorElementType(srcType))
+ {
+ case 1:
+ srcValue = (UINT64)*((BYTE*)pSrc);
+ break;
+ case 2:
+ srcValue = (UINT64)*((USHORT*)pSrc);
+ break;
+ case 4:
+ srcValue = (UINT64)*((UINT32*)pSrc);
+ break;
+ case 8:
+ srcValue = (UINT64)*((UINT64*)pSrc);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ // Cast to the appropriate type using the destination CorElementType.
+ switch (dstType)
+ {
+ case ELEMENT_TYPE_BOOLEAN:
+ *(BYTE*)pDst = (BYTE)!!srcValue;
+ break;
+ case ELEMENT_TYPE_I1:
+ *(INT8*)pDst = (INT8)srcValue;
+ break;
+ case ELEMENT_TYPE_U1:
+ *(UINT8*)pDst = (UINT8)srcValue;
+ break;
+ case ELEMENT_TYPE_I2:
+ *(INT16*)pDst = (INT16)srcValue;
+ break;
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_CHAR:
+ *(UINT16*)pDst = (UINT16)srcValue;
+ break;
+#if !defined(_WIN64)
+ case ELEMENT_TYPE_I:
+#endif
+ case ELEMENT_TYPE_I4:
+ *(int*)pDst = (int)srcValue;
+ break;
+#if !defined(_WIN64)
+ case ELEMENT_TYPE_U:
+#endif
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_R4:
+ *(unsigned*)pDst = (unsigned)srcValue;
+ break;
+#if defined(_WIN64)
+ case ELEMENT_TYPE_I:
+#endif
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_R8:
+ *(INT64*)pDst = (INT64)srcValue;
+ break;
+
+#if defined(_WIN64)
+ case ELEMENT_TYPE_U:
+#endif
+ case ELEMENT_TYPE_U8:
+ *(UINT64*)pDst = (UINT64)srcValue;
+ break;
+ case ELEMENT_TYPE_FNPTR:
+ case ELEMENT_TYPE_PTR:
+ *(void **)pDst = (void *)(SIZE_T)srcValue;
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+}
+
+
+//
+// Throw on not supported func evals
+//
+static void ValidateFuncEvalReturnType(DebuggerIPCE_FuncEvalType evalType, MethodTable * pMT)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ if (pMT == g_pStringClass)
+ {
+ if (evalType == DB_IPCE_FET_NEW_OBJECT || evalType == DB_IPCE_FET_NEW_OBJECT_NC)
+ {
+ // Cannot call New object on String constructor.
+ COMPlusThrow(kArgumentException,W("Argument_CannotCreateString"));
+ }
+ }
+ else if (g_pEEInterface->IsTypedReference(pMT))
+ {
+ // Cannot create typed references through funceval.
+ if (evalType == DB_IPCE_FET_NEW_OBJECT || evalType == DB_IPCE_FET_NEW_OBJECT_NC || evalType == DB_IPCE_FET_NORMAL)
+ {
+ COMPlusThrow(kArgumentException, W("Argument_CannotCreateTypedReference"));
+ }
+ }
+}
+
+//
+// Given a register, return the value.
+//
+static SIZE_T GetRegisterValue(DebuggerEval *pDE, CorDebugRegister reg, void *regAddr, SIZE_T regValue)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ SIZE_T ret = 0;
+
+ // Check whether the register address is the marker value for a register in a non-leaf frame.
+ // This is related to the funceval breaking change.
+ //
+ if (regAddr == CORDB_ADDRESS_TO_PTR(kNonLeafFrameRegAddr))
+ {
+ ret = regValue;
+ }
+ else
+ {
+ switch (reg)
+ {
+ case REGISTER_STACK_POINTER:
+ ret = (SIZE_T)GetSP(&pDE->m_context);
+ break;
+
+ case REGISTER_FRAME_POINTER:
+ ret = (SIZE_T)GetFP(&pDE->m_context);
+ break;
+
+#if defined(_TARGET_X86_)
+ case REGISTER_X86_EAX:
+ ret = pDE->m_context.Eax;
+ break;
+
+ case REGISTER_X86_ECX:
+ ret = pDE->m_context.Ecx;
+ break;
+
+ case REGISTER_X86_EDX:
+ ret = pDE->m_context.Edx;
+ break;
+
+ case REGISTER_X86_EBX:
+ ret = pDE->m_context.Ebx;
+ break;
+
+ case REGISTER_X86_ESI:
+ ret = pDE->m_context.Esi;
+ break;
+
+ case REGISTER_X86_EDI:
+ ret = pDE->m_context.Edi;
+ break;
+
+#elif defined(_TARGET_AMD64_)
+ case REGISTER_AMD64_RAX:
+ ret = pDE->m_context.Rax;
+ break;
+
+ case REGISTER_AMD64_RCX:
+ ret = pDE->m_context.Rcx;
+ break;
+
+ case REGISTER_AMD64_RDX:
+ ret = pDE->m_context.Rdx;
+ break;
+
+ case REGISTER_AMD64_RBX:
+ ret = pDE->m_context.Rbx;
+ break;
+
+ case REGISTER_AMD64_RSI:
+ ret = pDE->m_context.Rsi;
+ break;
+
+ case REGISTER_AMD64_RDI:
+ ret = pDE->m_context.Rdi;
+ break;
+
+ case REGISTER_AMD64_R8:
+ ret = pDE->m_context.R8;
+ break;
+
+ case REGISTER_AMD64_R9:
+ ret = pDE->m_context.R9;
+ break;
+
+ case REGISTER_AMD64_R10:
+ ret = pDE->m_context.R10;
+ break;
+
+ case REGISTER_AMD64_R11:
+ ret = pDE->m_context.R11;
+ break;
+
+ case REGISTER_AMD64_R12:
+ ret = pDE->m_context.R12;
+ break;
+
+ case REGISTER_AMD64_R13:
+ ret = pDE->m_context.R13;
+ break;
+
+ case REGISTER_AMD64_R14:
+ ret = pDE->m_context.R14;
+ break;
+
+ case REGISTER_AMD64_R15:
+ ret = pDE->m_context.R15;
+ break;
+
+ // fall through
+ case REGISTER_AMD64_XMM0:
+ case REGISTER_AMD64_XMM1:
+ case REGISTER_AMD64_XMM2:
+ case REGISTER_AMD64_XMM3:
+ case REGISTER_AMD64_XMM4:
+ case REGISTER_AMD64_XMM5:
+ case REGISTER_AMD64_XMM6:
+ case REGISTER_AMD64_XMM7:
+ case REGISTER_AMD64_XMM8:
+ case REGISTER_AMD64_XMM9:
+ case REGISTER_AMD64_XMM10:
+ case REGISTER_AMD64_XMM11:
+ case REGISTER_AMD64_XMM12:
+ case REGISTER_AMD64_XMM13:
+ case REGISTER_AMD64_XMM14:
+ case REGISTER_AMD64_XMM15:
+ ret = FPSpillToR8(&(pDE->m_context.Xmm0) + (reg - REGISTER_AMD64_XMM0));
+ break;
+
+#endif // !_TARGET_X86_ && !_TARGET_AMD64_
+ default:
+ _ASSERT(!"Invalid register number!");
+
+ }
+ }
+
+ return ret;
+}
+
+//
+// Given a register, set its value.
+//
+static void SetRegisterValue(DebuggerEval *pDE, CorDebugRegister reg, void *regAddr, SIZE_T newValue)
+{
+ CONTRACTL
+ {
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ // Check whether the register address is the marker value for a register in a non-leaf frame.
+ // If so, then we can't update the register. Throw an exception to communicate this error.
+ if (regAddr == CORDB_ADDRESS_TO_PTR(kNonLeafFrameRegAddr))
+ {
+ COMPlusThrowHR(CORDBG_E_FUNC_EVAL_CANNOT_UPDATE_REGISTER_IN_NONLEAF_FRAME);
+ return;
+ }
+ else
+ {
+ switch (reg)
+ {
+ case REGISTER_STACK_POINTER:
+ SetSP(&pDE->m_context, newValue);
+ break;
+
+ case REGISTER_FRAME_POINTER:
+ SetFP(&pDE->m_context, newValue);
+ break;
+
+#ifdef _TARGET_X86_
+ case REGISTER_X86_EAX:
+ pDE->m_context.Eax = newValue;
+ break;
+
+ case REGISTER_X86_ECX:
+ pDE->m_context.Ecx = newValue;
+ break;
+
+ case REGISTER_X86_EDX:
+ pDE->m_context.Edx = newValue;
+ break;
+
+ case REGISTER_X86_EBX:
+ pDE->m_context.Ebx = newValue;
+ break;
+
+ case REGISTER_X86_ESI:
+ pDE->m_context.Esi = newValue;
+ break;
+
+ case REGISTER_X86_EDI:
+ pDE->m_context.Edi = newValue;
+ break;
+
+#elif defined(_TARGET_AMD64_)
+ case REGISTER_AMD64_RAX:
+ pDE->m_context.Rax = newValue;
+ break;
+
+ case REGISTER_AMD64_RCX:
+ pDE->m_context.Rcx = newValue;
+ break;
+
+ case REGISTER_AMD64_RDX:
+ pDE->m_context.Rdx = newValue;
+ break;
+
+ case REGISTER_AMD64_RBX:
+ pDE->m_context.Rbx = newValue;
+ break;
+
+ case REGISTER_AMD64_RSI:
+ pDE->m_context.Rsi = newValue;
+ break;
+
+ case REGISTER_AMD64_RDI:
+ pDE->m_context.Rdi = newValue;
+ break;
+
+ case REGISTER_AMD64_R8:
+ pDE->m_context.R8= newValue;
+ break;
+
+ case REGISTER_AMD64_R9:
+ pDE->m_context.R9= newValue;
+ break;
+
+ case REGISTER_AMD64_R10:
+ pDE->m_context.R10= newValue;
+ break;
+
+ case REGISTER_AMD64_R11:
+ pDE->m_context.R11 = newValue;
+ break;
+
+ case REGISTER_AMD64_R12:
+ pDE->m_context.R12 = newValue;
+ break;
+
+ case REGISTER_AMD64_R13:
+ pDE->m_context.R13 = newValue;
+ break;
+
+ case REGISTER_AMD64_R14:
+ pDE->m_context.R14 = newValue;
+ break;
+
+ case REGISTER_AMD64_R15:
+ pDE->m_context.R15 = newValue;
+ break;
+
+ // fall through
+ case REGISTER_AMD64_XMM0:
+ case REGISTER_AMD64_XMM1:
+ case REGISTER_AMD64_XMM2:
+ case REGISTER_AMD64_XMM3:
+ case REGISTER_AMD64_XMM4:
+ case REGISTER_AMD64_XMM5:
+ case REGISTER_AMD64_XMM6:
+ case REGISTER_AMD64_XMM7:
+ case REGISTER_AMD64_XMM8:
+ case REGISTER_AMD64_XMM9:
+ case REGISTER_AMD64_XMM10:
+ case REGISTER_AMD64_XMM11:
+ case REGISTER_AMD64_XMM12:
+ case REGISTER_AMD64_XMM13:
+ case REGISTER_AMD64_XMM14:
+ case REGISTER_AMD64_XMM15:
+ R8ToFPSpill(&(pDE->m_context.Xmm0) + (reg - REGISTER_AMD64_XMM0), newValue);
+ break;
+
+#endif // !_TARGET_X86_ && !_TARGET_AMD64_
+ default:
+ _ASSERT(!"Invalid register number!");
+
+ }
+ }
+}
+
+
+/*
+ * GetRegsiterValueAndReturnAddress
+ *
+ * This routine takes out a value from a register, or set of registers, into one of
+ * the given buffers (depending on size), and returns a pointer to the filled in
+ * buffer, or NULL on error.
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * pFEAD - Information about this particular argument.
+ * pInt64Buf - pointer to a buffer of type INT64
+ * pSizeTBuf - pointer to a buffer of native size type.
+ *
+ * Returns:
+ * pointer to the filled in buffer, else NULL on error.
+ *
+ */
+static PVOID GetRegisterValueAndReturnAddress(DebuggerEval *pDE,
+ DebuggerIPCE_FuncEvalArgData *pFEAD,
+ INT64 *pInt64Buf,
+ SIZE_T *pSizeTBuf
+ )
+{
+ LIMITED_METHOD_CONTRACT;
+
+ PVOID pAddr;
+
+#if !defined(_WIN64)
+ pAddr = pInt64Buf;
+ DWORD *pLow = (DWORD*)(pInt64Buf);
+ DWORD *pHigh = pLow + 1;
+#endif // _WIN64
+
+ switch (pFEAD->argHome.kind)
+ {
+#if !defined(_WIN64)
+ case RAK_REGREG:
+ *pLow = GetRegisterValue(pDE, pFEAD->argHome.u.reg2, pFEAD->argHome.u.reg2Addr, pFEAD->argHome.u.reg2Value);
+ *pHigh = GetRegisterValue(pDE, pFEAD->argHome.reg1, pFEAD->argHome.reg1Addr, pFEAD->argHome.reg1Value);
+ break;
+
+ case RAK_MEMREG:
+ *pLow = GetRegisterValue(pDE, pFEAD->argHome.reg1, pFEAD->argHome.reg1Addr, pFEAD->argHome.reg1Value);
+ *pHigh = *((DWORD*)CORDB_ADDRESS_TO_PTR(pFEAD->argHome.addr));
+ break;
+
+ case RAK_REGMEM:
+ *pLow = *((DWORD*)CORDB_ADDRESS_TO_PTR(pFEAD->argHome.addr));
+ *pHigh = GetRegisterValue(pDE, pFEAD->argHome.reg1, pFEAD->argHome.reg1Addr, pFEAD->argHome.reg1Value);
+ break;
+#endif // _WIN64
+
+ case RAK_REG:
+ // Simply grab the value out of the proper register.
+ *pSizeTBuf = GetRegisterValue(pDE, pFEAD->argHome.reg1, pFEAD->argHome.reg1Addr, pFEAD->argHome.reg1Value);
+ pAddr = pSizeTBuf;
+ break;
+
+ default:
+ pAddr = NULL;
+ break;
+ }
+
+ return pAddr;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Clean up any temporary value class variables we have allocated for the funceval.
+//
+// Arguments:
+// pStackStructArray - array whose elements track the location and type of the temporary variables
+//
+
+void CleanUpTemporaryVariables(ValueClassInfo ** ppProtectedValueClasses)
+{
+ while (*ppProtectedValueClasses != NULL)
+ {
+ ValueClassInfo * pValueClassInfo = *ppProtectedValueClasses;
+ *ppProtectedValueClasses = pValueClassInfo->pNext;
+
+ DeleteInteropSafe(reinterpret_cast<BYTE *>(pValueClassInfo));
+ }
+}
+
+
+#ifdef _DEBUG
+
+//
+// Create a parallel array that tracks that we have initialized information in
+// each array.
+//
+#define MAX_DATA_LOCATIONS_TRACKED 100
+
+typedef DWORD DataLocation;
+
+#define DL_NonExistent 0x00
+#define DL_ObjectRefArray 0x01
+#define DL_MaybeInteriorPtrArray 0x02
+#define DL_BufferForArgsArray 0x04
+#define DL_All 0xFF
+
+#endif // _DEBUG
+
+
+/*
+ * GetFuncEvalArgValue
+ *
+ * This routine is used to fill the pArgument array with the appropriate value. This function
+ * uses the three parallel array entries given, and places the correct value, or reference to
+ * the value in pArgument.
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * pFEAD - Information about this particular argument.
+ * isByRef - Is the argument being passed ByRef.
+ * fNeedBoxOrUnbox - Did the argument need boxing or unboxing.
+ * argTH - The type handle for the argument.
+ * byrefArgSigType - The signature type of a parameter that isByRef == true.
+ * pArgument - Location to place the reference or value.
+ * pMaybeInteriorPtrArg - A pointer that contains a value that may be pointers to
+ * the interior of a managed object.
+ * pObjectRefArg - A pointer that contains an object ref. It was built previously.
+ * pBufferArg - A pointer for holding stuff that did not need to be protected.
+ *
+ * Returns:
+ * None.
+ *
+ */
+static void GetFuncEvalArgValue(DebuggerEval *pDE,
+ DebuggerIPCE_FuncEvalArgData *pFEAD,
+ bool isByRef,
+ bool fNeedBoxOrUnbox,
+ TypeHandle argTH,
+ CorElementType byrefArgSigType,
+ TypeHandle byrefArgTH,
+ ARG_SLOT *pArgument,
+ void *pMaybeInteriorPtrArg,
+ OBJECTREF *pObjectRefArg,
+ INT64 *pBufferArg,
+ ValueClassInfo ** ppProtectedValueClasses,
+ CorElementType argSigType
+ DEBUG_ARG(DataLocation dataLocation)
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE((dataLocation != DL_NonExistent) ||
+ (pFEAD->argElementType == ELEMENT_TYPE_VALUETYPE));
+
+ switch (pFEAD->argElementType)
+ {
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R8:
+ {
+ INT64 *pSource;
+
+#if defined(_WIN64)
+ _ASSERTE(dataLocation & DL_MaybeInteriorPtrArray);
+
+ pSource = (INT64 *)pMaybeInteriorPtrArg;
+#else // !_WIN64
+ _ASSERTE(dataLocation & DL_BufferForArgsArray);
+
+ pSource = pBufferArg;
+#endif // !_WIN64
+
+ if (!isByRef)
+ {
+ *((INT64*)pArgument) = *pSource;
+ }
+ else
+ {
+ *pArgument = PtrToArgSlot(pSource);
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ SIZE_T v = 0;
+ LPVOID pAddr = NULL;
+ INT64 bigVal = 0;
+
+ if (pFEAD->argAddr != NULL)
+ {
+ pAddr = *((void **)pMaybeInteriorPtrArg);
+ }
+ else
+ {
+ pAddr = GetRegisterValueAndReturnAddress(pDE, pFEAD, &bigVal, &v);
+
+ if (pAddr == NULL)
+ {
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Generic"));
+ }
+ }
+
+
+ _ASSERTE(pAddr);
+
+ if (!fNeedBoxOrUnbox && !isByRef)
+ {
+ _ASSERTE(argTH.GetMethodTable());
+
+ unsigned size = argTH.GetMethodTable()->GetNumInstanceFieldBytes();
+ if (size <= sizeof(ARG_SLOT)
+#if defined(_TARGET_AMD64_)
+ // On AMD64 we pass value types of size which are not powers of 2 by ref.
+ && ((size & (size-1)) == 0)
+#endif // _TARGET_AMD64_
+ )
+ {
+ memcpyNoGCRefs(ArgSlotEndianessFixup(pArgument, sizeof(LPVOID)), pAddr, size);
+ }
+ else
+ {
+ _ASSERTE(pFEAD->argAddr != NULL);
+#if defined(ENREGISTERED_PARAMTYPE_MAXSIZE)
+ if (ArgIterator::IsArgPassedByRef(argTH))
+ {
+ // On X64, by-value value class arguments which are bigger than 8 bytes are passed by reference
+ // according to the native calling convention. The same goes for value class arguments whose size
+ // is smaller than 8 bytes but not a power of 2. To avoid side effets, we need to allocate a
+ // temporary variable and pass that by reference instead. On ARM64, by-value value class
+ // arguments which are bigger than 16 bytes are passed by reference.
+ _ASSERTE(ppProtectedValueClasses != NULL);
+
+ BYTE * pTemp = new (interopsafe) BYTE[ALIGN_UP(sizeof(ValueClassInfo), 8) + size];
+
+ ValueClassInfo * pValueClassInfo = (ValueClassInfo *)pTemp;
+ LPVOID pData = pTemp + ALIGN_UP(sizeof(ValueClassInfo), 8);
+
+ memcpyNoGCRefs(pData, pAddr, size);
+ *pArgument = PtrToArgSlot(pData);
+
+ pValueClassInfo->pData = pData;
+ pValueClassInfo->pMT = argTH.GetMethodTable();
+
+ pValueClassInfo->pNext = *ppProtectedValueClasses;
+ *ppProtectedValueClasses = pValueClassInfo;
+ }
+ else
+#endif // ENREGISTERED_PARAMTYPE_MAXSIZE
+ *pArgument = PtrToArgSlot(pAddr);
+
+ }
+ }
+ else
+ {
+ if (fNeedBoxOrUnbox)
+ {
+ *pArgument = ObjToArgSlot(*pObjectRefArg);
+ }
+ else
+ {
+ if (pFEAD->argAddr)
+ {
+ *pArgument = PtrToArgSlot(pAddr);
+ }
+ else
+ {
+ // The argument is the address of where we're holding the primitive in the PrimitiveArg array. We
+ // stick the real value from the register into the PrimitiveArg array. It should be in a single
+ // register since it is pointer-sized.
+ _ASSERTE( pFEAD->argHome.kind == RAK_REG );
+ *pArgument = PtrToArgSlot(pBufferArg);
+ *pBufferArg = (INT64)v;
+ }
+ }
+ }
+ }
+ break;
+
+ default:
+ // literal values smaller than 8 bytes and "special types" (e.g. object, string, etc.)
+
+ {
+ INT64 *pSource;
+
+ INDEBUG(DataLocation expectedLocation);
+
+#ifdef _TARGET_X86_
+ if ((pFEAD->argElementType == ELEMENT_TYPE_I4) ||
+ (pFEAD->argElementType == ELEMENT_TYPE_U4) ||
+ (pFEAD->argElementType == ELEMENT_TYPE_R4))
+ {
+ INDEBUG(expectedLocation = DL_MaybeInteriorPtrArray);
+
+ pSource = (INT64 *)pMaybeInteriorPtrArg;
+ }
+ else
+#endif
+ if (IsElementTypeSpecial(pFEAD->argElementType))
+ {
+ INDEBUG(expectedLocation = DL_ObjectRefArray);
+
+ pSource = (INT64 *)pObjectRefArg;
+ }
+ else
+ {
+ INDEBUG(expectedLocation = DL_BufferForArgsArray);
+
+ pSource = pBufferArg;
+ }
+
+ if (pFEAD->argAddr != NULL)
+ {
+ if (!isByRef)
+ {
+ if (pFEAD->argIsHandleValue)
+ {
+ _ASSERTE(dataLocation & DL_BufferForArgsArray);
+
+ OBJECTHANDLE oh = *((OBJECTHANDLE*)(pBufferArg)); // Always comes from buffer
+ *pArgument = PtrToArgSlot(g_pEEInterface->GetObjectFromHandle(oh));
+ }
+ else
+ {
+ _ASSERTE(dataLocation & expectedLocation);
+
+ if (pSource != NULL)
+ {
+ *pArgument = *pSource; // may come from either array.
+ }
+ else
+ {
+ *pArgument = NULL;
+ }
+ }
+ }
+ else
+ {
+ if (pFEAD->argIsHandleValue)
+ {
+ _ASSERTE(dataLocation & DL_BufferForArgsArray);
+
+ *pArgument = *pBufferArg; // Buffer contains the object handle, in this case, so
+ // just copy that across.
+ }
+ else
+ {
+ _ASSERTE(dataLocation & expectedLocation);
+
+ *pArgument = PtrToArgSlot(pSource); // Load the argument with the address of our buffer.
+ }
+ }
+ }
+ else if (pFEAD->argIsLiteral)
+ {
+ _ASSERTE(dataLocation & expectedLocation);
+
+ if (!isByRef)
+ {
+ if (pSource != NULL)
+ {
+ *pArgument = *pSource; // may come from either array.
+ }
+ else
+ {
+ *pArgument = NULL;
+ }
+ }
+ else
+ {
+ *pArgument = PtrToArgSlot(pSource); // Load the argument with the address of our buffer.
+ }
+ }
+ else
+ {
+ if (!isByRef)
+ {
+ if (pSource != NULL)
+ {
+ *pArgument = *pSource; // may come from either array.
+ }
+ else
+ {
+ *pArgument = NULL;
+ }
+ }
+ else
+ {
+ *pArgument = PtrToArgSlot(pSource); // Load the argument with the address of our buffer.
+ }
+ }
+
+ // If we need to unbox, then unbox the arg now.
+ if (fNeedBoxOrUnbox)
+ {
+ if (!isByRef)
+ {
+ // function expects valuetype, argument received is class or object
+
+ // Take the ObjectRef off the stack.
+ ARG_SLOT oi1 = *pArgument;
+ OBJECTREF o1 = ArgSlotToObj(oi1);
+
+ // For Nullable types, we need a 'true' nullable to pass to the function, and we do this
+ // by passing a boxed nullable that we unbox. We allocated this space earlier however we
+ // did not know the data location until just now. Fill it in with the data and use that
+ // to pass to the function.
+
+ if (Nullable::IsNullableType(argTH))
+ {
+ _ASSERTE(*pObjectRefArg != 0);
+ _ASSERTE((*pObjectRefArg)->GetMethodTable() == argTH.GetMethodTable());
+ if (o1 != *pObjectRefArg)
+ {
+ Nullable::UnBoxNoCheck((*pObjectRefArg)->GetData(), o1, (*pObjectRefArg)->GetMethodTable());
+ o1 = *pObjectRefArg;
+ }
+ }
+
+ if (o1 == NULL)
+ {
+ COMPlusThrow(kArgumentException, W("ArgumentNull_Obj"));
+ }
+
+
+ if (!o1->GetMethodTable()->IsValueType())
+ {
+ COMPlusThrow(kArgumentException, W("Argument_BadObjRef"));
+ }
+
+
+ // Unbox the little fella to get a pointer to the raw data.
+ void *pData = o1->GetData();
+
+ // Get its size to make sure it fits in an ARG_SLOT
+ unsigned size = o1->GetMethodTable()->GetNumInstanceFieldBytes();
+
+ if (size <= sizeof(ARG_SLOT))
+ {
+ // Its not ByRef, so we need to copy the value class onto the ARG_SLOT.
+ CopyValueClassUnchecked(ArgSlotEndianessFixup(pArgument, sizeof(LPVOID)), pData, o1->GetMethodTable());
+ }
+ else
+ {
+ // Store pointer to the space in the ARG_SLOT
+ *pArgument = PtrToArgSlot(pData);
+ }
+ }
+ else
+ {
+ // Function expects byref valuetype, argument received is byref class.
+
+ // Grab the ObjectRef off the stack via the pointer on the stack. Note: the stack has a pointer to the
+ // ObjectRef since the arg was specified as byref.
+ OBJECTREF* op1 = (OBJECTREF*)ArgSlotToPtr(*pArgument);
+ if (op1 == NULL)
+ {
+ COMPlusThrow(kArgumentException, W("ArgumentNull_Obj"));
+ }
+ OBJECTREF o1 = *op1;
+
+ // For Nullable types, we need a 'true' nullable to pass to the function, and we do this
+ // by passing a boxed nullable that we unbox. We allocated this space earlier however we
+ // did not know the data location until just now. Fill it in with the data and use that
+ // to pass to the function.
+
+ if (Nullable::IsNullableType(byrefArgTH))
+ {
+ _ASSERTE(*pObjectRefArg != 0 && (*pObjectRefArg)->GetMethodTable() == byrefArgTH.GetMethodTable());
+ if (o1 != *pObjectRefArg)
+ {
+ Nullable::UnBoxNoCheck((*pObjectRefArg)->GetData(), o1, (*pObjectRefArg)->GetMethodTable());
+ o1 = *pObjectRefArg;
+ }
+ }
+
+ if (o1 == NULL)
+ {
+ COMPlusThrow(kArgumentException, W("ArgumentNull_Obj"));
+ }
+
+ _ASSERTE(o1->GetMethodTable()->IsValueType());
+
+ // Unbox the little fella to get a pointer to the raw data.
+ void *pData = o1->GetData();
+
+ // If it is ByRef, then we just replace the ObjectRef with a pointer to the data.
+ *pArgument = PtrToArgSlot(pData);
+ }
+ }
+
+ // Validate any objectrefs that are supposed to be on the stack.
+ // <TODO>@TODO: Move this to before the boxing/unboxing above</TODO>
+ if (!fNeedBoxOrUnbox)
+ {
+ Object *objPtr;
+ if (!isByRef)
+ {
+ if (IsElementTypeSpecial(argSigType))
+ {
+ // validate the integrity of the object
+ objPtr = (Object*)ArgSlotToPtr(*pArgument);
+ if (FAILED(ValidateObject(objPtr)))
+ {
+ COMPlusThrow(kArgumentException, W("Argument_BadObjRef"));
+ }
+ }
+ }
+ else
+ {
+ _ASSERTE(argSigType == ELEMENT_TYPE_BYREF);
+ if (IsElementTypeSpecial(byrefArgSigType))
+ {
+ objPtr = *(Object**)(ArgSlotToPtr(*pArgument));
+ if (FAILED(ValidateObject(objPtr)))
+ {
+ COMPlusThrow(kArgumentException, W("Argument_BadObjRef"));
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+static CorDebugRegister GetArgAddrFromReg( DebuggerIPCE_FuncEvalArgData *pFEAD)
+{
+ CorDebugRegister retval = REGISTER_INSTRUCTION_POINTER; // good as default as any
+#if defined(_WIN64)
+ retval = (pFEAD->argHome.kind == RAK_REG ?
+ pFEAD->argHome.reg1 :
+ (CorDebugRegister)((int)REGISTER_IA64_F0 + pFEAD->argHome.floatIndex));
+#else // !_WIN64
+ retval = pFEAD->argHome.reg1;
+#endif // !_WIN64
+ return retval;
+}
+
+//
+// Given info about a byref argument, retrieve the current value from the pBufferForArgsArray,
+// the pMaybeInteriorPtrArray, the pByRefMaybeInteriorPtrArray, or the pObjectRefArray. Then
+// place it back into the proper register or address.
+//
+// Note that we should never use the argAddr of the DebuggerIPCE_FuncEvalArgData in this function
+// since the address may be an interior GC pointer and may have been moved by the GC. Instead,
+// use the pByRefMaybeInteriorPtrArray.
+//
+static void SetFuncEvalByRefArgValue(DebuggerEval *pDE,
+ DebuggerIPCE_FuncEvalArgData *pFEAD,
+ CorElementType byrefArgSigType,
+ INT64 bufferByRefArg,
+ void *maybeInteriorPtrArg,
+ void *byRefMaybeInteriorPtrArg,
+ OBJECTREF objectRefByRefArg)
+{
+ WRAPPER_NO_CONTRACT;
+
+ switch (pFEAD->argElementType)
+ {
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R8:
+ // 64bit values
+ {
+ INT64 source;
+
+#if defined(_WIN64)
+ source = (INT64)maybeInteriorPtrArg;
+#else // !_WIN64
+ source = bufferByRefArg;
+#endif // !_WIN64
+
+ if (pFEAD->argIsLiteral)
+ {
+ // If this was a literal arg, then copy the updated primitive back into the literal.
+ memcpy(pFEAD->argLiteralData, &source, sizeof(pFEAD->argLiteralData));
+ }
+ else if (pFEAD->argAddr != NULL)
+ {
+ *((INT64 *)byRefMaybeInteriorPtrArg) = source;
+ return;
+ }
+ else
+ {
+#if !defined(_WIN64)
+ // RAK_REG is the only 4 byte type, all others are 8 byte types.
+ _ASSERTE(pFEAD->argHome.kind != RAK_REG);
+
+ SIZE_T *pLow = (SIZE_T*)(&source);
+ SIZE_T *pHigh = pLow + 1;
+
+ switch (pFEAD->argHome.kind)
+ {
+ case RAK_REGREG:
+ SetRegisterValue(pDE, pFEAD->argHome.u.reg2, pFEAD->argHome.u.reg2Addr, *pLow);
+ SetRegisterValue(pDE, pFEAD->argHome.reg1, pFEAD->argHome.reg1Addr, *pHigh);
+ break;
+
+ case RAK_MEMREG:
+ SetRegisterValue(pDE, pFEAD->argHome.reg1, pFEAD->argHome.reg1Addr, *pLow);
+ *((SIZE_T*)CORDB_ADDRESS_TO_PTR(pFEAD->argHome.addr)) = *pHigh;
+ break;
+
+ case RAK_REGMEM:
+ *((SIZE_T*)CORDB_ADDRESS_TO_PTR(pFEAD->argHome.addr)) = *pLow;
+ SetRegisterValue(pDE, pFEAD->argHome.reg1, pFEAD->argHome.reg1Addr, *pHigh);
+ break;
+
+ default:
+ break;
+ }
+#else // _WIN64
+ // The only types we use are RAK_REG and RAK_FLOAT, and both of them can be 4 or 8 bytes.
+ _ASSERTE((pFEAD->argHome.kind == RAK_REG) || (pFEAD->argHome.kind == RAK_FLOAT));
+
+ SetRegisterValue(pDE, pFEAD->argHome.reg1, pFEAD->argHome.reg1Addr, source);
+#endif // _WIN64
+ }
+ }
+ break;
+
+ default:
+ // literal values smaller than 8 bytes and "special types" (e.g. object, array, string, etc.)
+ {
+ SIZE_T source;
+
+#ifdef _TARGET_X86_
+ if ((pFEAD->argElementType == ELEMENT_TYPE_I4) ||
+ (pFEAD->argElementType == ELEMENT_TYPE_U4) ||
+ (pFEAD->argElementType == ELEMENT_TYPE_R4))
+ {
+ source = (SIZE_T)maybeInteriorPtrArg;
+ }
+ else
+ {
+#endif
+ source = (SIZE_T)bufferByRefArg;
+#ifdef _TARGET_X86_
+ }
+#endif
+
+ if (pFEAD->argIsLiteral)
+ {
+ // If this was a literal arg, then copy the updated primitive back into the literal.
+ // The literall buffer is a fixed size (8 bytes), but our source may be 4 or 8 bytes
+ // depending on the platform. To prevent reading past the end of the source, we
+ // zero the destination buffer and copy only as many bytes as available.
+ memset( pFEAD->argLiteralData, 0, sizeof(pFEAD->argLiteralData) );
+ if (IsElementTypeSpecial(pFEAD->argElementType))
+ {
+ _ASSERTE( sizeof(pFEAD->argLiteralData) >= sizeof(objectRefByRefArg) );
+ memcpy(pFEAD->argLiteralData, &objectRefByRefArg, sizeof(objectRefByRefArg));
+ }
+ else
+ {
+ _ASSERTE( sizeof(pFEAD->argLiteralData) >= sizeof(source) );
+ memcpy(pFEAD->argLiteralData, &source, sizeof(source));
+ }
+ }
+ else if (pFEAD->argAddr == NULL)
+ {
+ // If the 32bit value is enregistered, copy it back to the proper regs.
+
+ // RAK_REG is the only valid 4 byte type on WIN32. On WIN64, both RAK_REG and RAK_FLOAT can be
+ // 4 bytes or 8 bytes.
+ _ASSERTE((pFEAD->argHome.kind == RAK_REG)
+ WIN64_ONLY(|| (pFEAD->argHome.kind == RAK_FLOAT)));
+
+ CorDebugRegister regNum = GetArgAddrFromReg(pFEAD);
+
+ // Shove the result back into the proper register.
+ if (IsElementTypeSpecial(pFEAD->argElementType))
+ {
+ SetRegisterValue(pDE, regNum, pFEAD->argHome.reg1Addr, (SIZE_T)ObjToArgSlot(objectRefByRefArg));
+ }
+ else
+ {
+ SetRegisterValue(pDE, regNum, pFEAD->argHome.reg1Addr, (SIZE_T)source);
+ }
+ }
+ else
+ {
+ // If the result was an object by ref, then copy back the new location of the object (in GC case).
+ if (pFEAD->argIsHandleValue)
+ {
+ // do nothing. The Handle was passed in the pArgument array directly
+ }
+ else if (IsElementTypeSpecial(pFEAD->argElementType))
+ {
+ *((SIZE_T*)byRefMaybeInteriorPtrArg) = (SIZE_T)ObjToArgSlot(objectRefByRefArg);
+ }
+ else if (pFEAD->argElementType == ELEMENT_TYPE_VALUETYPE)
+ {
+ // Do nothing, we passed in the pointer to the valuetype in the pArgument array directly.
+ }
+ else
+ {
+ GetAndSetLiteralValue(byRefMaybeInteriorPtrArg, pFEAD->argElementType, &source, ELEMENT_TYPE_PTR);
+ }
+ }
+ } // end default
+ } // end switch
+}
+
+
+/*
+ * GCProtectAllPassedArgs
+ *
+ * This routine is the first step in doing a func-eval. For a complete overview, see
+ * the comments at the top of this file.
+ *
+ * This routine over-aggressively protects all arguments that may be references to
+ * managed objects. This function cannot crawl the function signature, since doing
+ * so may trigger a GC, and thus, we must assume everything is ByRef.
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * pObjectRefArray - An array that contains any object refs. It was built previously.
+ * pMaybeInteriorPtrArray - An array that contains values that may be pointers to
+ * the interior of a managed object.
+ * pBufferForArgsArray - An array for holding stuff that does not need to be protected.
+ * Any handle for the 'this' pointer is put in here for pulling it out later.
+ *
+ * Returns:
+ * None.
+ *
+ */
+static void GCProtectAllPassedArgs(DebuggerEval *pDE,
+ OBJECTREF *pObjectRefArray,
+ void **pMaybeInteriorPtrArray,
+ void **pByRefMaybeInteriorPtrArray,
+ INT64 *pBufferForArgsArray
+ DEBUG_ARG(DataLocation pDataLocationArray[])
+ )
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+
+ DebuggerIPCE_FuncEvalArgData *argData = pDE->GetArgData();
+
+ unsigned currArgIndex = 0;
+
+ //
+ // Gather all the information for the parameters.
+ //
+ for ( ; currArgIndex < pDE->m_argCount; currArgIndex++)
+ {
+ DebuggerIPCE_FuncEvalArgData *pFEAD = &argData[currArgIndex];
+
+ // In case any of the arguments is a by ref argument and points into the GC heap,
+ // we need to GC protect their addresses as well.
+ if (pFEAD->argAddr != NULL)
+ {
+ pByRefMaybeInteriorPtrArray[currArgIndex] = pFEAD->argAddr;
+ }
+
+ switch (pFEAD->argElementType)
+ {
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R8:
+ // 64bit values
+
+#if defined(_WIN64)
+ //
+ // Only need to worry about protecting if a pointer is a 64 bit quantity.
+ //
+ _ASSERTE(sizeof(void *) == sizeof(INT64));
+
+ if (pFEAD->argAddr != NULL)
+ {
+ pMaybeInteriorPtrArray[currArgIndex] = *((void **)(pFEAD->argAddr));
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_MaybeInteriorPtrArray;
+ }
+#endif
+ }
+ else if (pFEAD->argIsLiteral)
+ {
+ _ASSERTE(sizeof(pFEAD->argLiteralData) >= sizeof(void *));
+
+ //
+ // If this is a byref literal arg, then it maybe an interior ptr.
+ //
+ void *v = NULL;
+ memcpy(&v, pFEAD->argLiteralData, sizeof(v));
+ pMaybeInteriorPtrArray[currArgIndex] = v;
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_MaybeInteriorPtrArray;
+ }
+#endif
+ }
+ else
+ {
+ _ASSERTE((pFEAD->argHome.kind == RAK_REG) || (pFEAD->argHome.kind == RAK_FLOAT));
+
+
+ CorDebugRegister regNum = GetArgAddrFromReg(pFEAD);
+ SIZE_T v = GetRegisterValue(pDE, regNum, pFEAD->argHome.reg1Addr, pFEAD->argHome.reg1Value);
+ pMaybeInteriorPtrArray[currArgIndex] = (void *)(v);
+
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_MaybeInteriorPtrArray;
+ }
+#endif
+ }
+#endif // _WIN64
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+ //
+ // If the value type address could be an interior pointer.
+ //
+ if (pFEAD->argAddr != NULL)
+ {
+ pMaybeInteriorPtrArray[currArgIndex] = ((void **)(pFEAD->argAddr));
+ }
+
+ INDEBUG(pDataLocationArray[currArgIndex] |= DL_MaybeInteriorPtrArray);
+ break;
+
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+
+ if (pFEAD->argAddr != NULL)
+ {
+ if (pFEAD->argIsHandleValue)
+ {
+ OBJECTHANDLE oh = (OBJECTHANDLE)(pFEAD->argAddr);
+ pBufferForArgsArray[currArgIndex] = (INT64)(size_t)oh;
+
+ INDEBUG(pDataLocationArray[currArgIndex] |= DL_BufferForArgsArray);
+ }
+ else
+ {
+ pObjectRefArray[currArgIndex] = *((OBJECTREF *)(pFEAD->argAddr));
+
+ INDEBUG(pDataLocationArray[currArgIndex] |= DL_ObjectRefArray);
+ }
+ }
+ else if (pFEAD->argIsLiteral)
+ {
+ _ASSERTE(sizeof(pFEAD->argLiteralData) >= sizeof(OBJECTREF));
+ OBJECTREF v = NULL;
+ memcpy(&v, pFEAD->argLiteralData, sizeof(v));
+ pObjectRefArray[currArgIndex] = v;
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_ObjectRefArray;
+ }
+#endif
+ }
+ else
+ {
+ // RAK_REG is the only valid pointer-sized type.
+ _ASSERTE(pFEAD->argHome.kind == RAK_REG);
+
+ // Simply grab the value out of the proper register.
+ SIZE_T v = GetRegisterValue(pDE, pFEAD->argHome.reg1, pFEAD->argHome.reg1Addr, pFEAD->argHome.reg1Value);
+
+ // The argument is the address.
+ pObjectRefArray[currArgIndex] = (OBJECTREF)v;
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_ObjectRefArray;
+ }
+#endif
+ }
+ break;
+
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_R4:
+ // 32bit values
+
+#ifdef _TARGET_X86_
+ _ASSERTE(sizeof(void *) == sizeof(INT32));
+
+ if (pFEAD->argAddr != NULL)
+ {
+ if (pFEAD->argIsHandleValue)
+ {
+ //
+ // Ignorable - no need to protect
+ //
+ }
+ else
+ {
+ pMaybeInteriorPtrArray[currArgIndex] = *((void **)(pFEAD->argAddr));
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_MaybeInteriorPtrArray;
+ }
+#endif
+ }
+ }
+ else if (pFEAD->argIsLiteral)
+ {
+ _ASSERTE(sizeof(pFEAD->argLiteralData) >= sizeof(INT32));
+
+ //
+ // If this is a byref literal arg, then it maybe an interior ptr.
+ //
+ void *v = NULL;
+ memcpy(&v, pFEAD->argLiteralData, sizeof(v));
+ pMaybeInteriorPtrArray[currArgIndex] = v;
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_MaybeInteriorPtrArray;
+ }
+#endif
+ }
+ else
+ {
+ // RAK_REG is the only valid 4 byte type on WIN32.
+ _ASSERTE(pFEAD->argHome.kind == RAK_REG);
+
+ // Simply grab the value out of the proper register.
+ SIZE_T v = GetRegisterValue(pDE, pFEAD->argHome.reg1, pFEAD->argHome.reg1Addr, pFEAD->argHome.reg1Value);
+
+ // The argument is the address.
+ pMaybeInteriorPtrArray[currArgIndex] = (void *)v;
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_MaybeInteriorPtrArray;
+ }
+#endif
+ }
+#endif // _TARGET_X86_
+
+ default:
+ //
+ // Ignorable - no need to protect
+ //
+ break;
+ }
+ }
+}
+
+/*
+ * ResolveFuncEvalGenericArgInfo
+ *
+ * This function pulls out any generic args and makes sure the method is loaded for it.
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ *
+ * Returns:
+ * None.
+ *
+ */
+void ResolveFuncEvalGenericArgInfo(DebuggerEval *pDE)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ }
+ CONTRACTL_END;
+
+ DebuggerIPCE_TypeArgData *firstdata = pDE->GetTypeArgData();
+ unsigned int nGenericArgs = pDE->m_genericArgsCount;
+ SIZE_T cbAllocSize;
+ if ((!ClrSafeInt<SIZE_T>::multiply(nGenericArgs, sizeof(TypeHandle *), cbAllocSize)) ||
+ (cbAllocSize != (size_t)(cbAllocSize)))
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+ TypeHandle * pGenericArgs = (nGenericArgs == 0) ? NULL : (TypeHandle *) _alloca(cbAllocSize);
+
+ //
+ // Snag the type arguments from the input and get the
+ // method desc that corresponds to the instantiated desc.
+ //
+ Debugger::TypeDataWalk walk(firstdata, pDE->m_genericArgsNodeCount);
+ walk.ReadTypeHandles(nGenericArgs, pGenericArgs);
+
+ // <TODO>better error message</TODO>
+ if (!walk.Finished())
+ {
+ COMPlusThrow(kArgumentException, W("Argument_InvalidGenericArg"));
+ }
+
+ // Find the proper MethodDesc that we need to call.
+ // Since we're already in the target domain, it can't be unloaded so it's safe to
+ // use domain specific structures like the Module*.
+ _ASSERTE( GetAppDomain() == pDE->m_debuggerModule->GetAppDomain() );
+ pDE->m_md = g_pEEInterface->LoadMethodDef(pDE->m_debuggerModule->GetRuntimeModule(),
+ pDE->m_methodToken,
+ nGenericArgs,
+ pGenericArgs,
+ &(pDE->m_ownerTypeHandle));
+
+
+ // We better have a MethodDesc at this point.
+ _ASSERTE(pDE->m_md != NULL);
+
+ IMDInternalImport *pInternalImport = pDE->m_md->GetMDImport();
+ DWORD dwAttr;
+ if (FAILED(pInternalImport->GetMethodDefProps(pDE->m_methodToken, &dwAttr)))
+ {
+ COMPlusThrow(kArgumentException, W("Argument_InvalidGenericArg"));
+ }
+
+ if (dwAttr & mdRequireSecObject)
+ {
+ // command window cannot evaluate a function with mdRequireSecObject is turned on because
+ // this is expecting to put a security object into caller's frame which we don't have.
+ //
+ COMPlusThrow(kArgumentException,W("Argument_CantCallSecObjFunc"));
+ }
+
+ ValidateFuncEvalReturnType(pDE->m_evalType , pDE->m_md->GetMethodTable());
+
+ // If this is a new object operation, then we should have a .ctor.
+ if ((pDE->m_evalType == DB_IPCE_FET_NEW_OBJECT) && !pDE->m_md->IsCtor())
+ {
+ COMPlusThrow(kArgumentException, W("Argument_MissingDefaultConstructor"));
+ }
+
+ pDE->m_md->EnsureActive();
+
+ // Run the Class Init for this class, if necessary.
+ MethodTable * pOwningMT = pDE->m_ownerTypeHandle.GetMethodTable();
+ pOwningMT->EnsureInstanceActive();
+ pOwningMT->CheckRunClassInitThrowing();
+
+ if (pDE->m_evalType == DB_IPCE_FET_NEW_OBJECT)
+ {
+ // Work out the exact type of the allocated object
+ pDE->m_resultType = (nGenericArgs == 0)
+ ? TypeHandle(pDE->m_md->GetMethodTable())
+ : g_pEEInterface->LoadInstantiation(pDE->m_md->GetModule(), pDE->m_md->GetMethodTable()->GetCl(), nGenericArgs, pGenericArgs);
+ }
+}
+
+
+/*
+ * BoxFuncEvalThisParameter
+ *
+ * This function is a helper for DoNormalFuncEval. It boxes the 'this' parameter if necessary.
+ * For example, when a method Object.ToString is called on a value class like System.DateTime
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * argData - Array of information about the arguments.
+ * pMaybeInteriorPtrArray - An array that contains values that may be pointers to
+ * the interior of a managed object.
+ * pObjectRef - A GC protected place to put a boxed value, if necessary.
+ *
+ * Returns:
+ * None
+ *
+ */
+void BoxFuncEvalThisParameter(DebuggerEval *pDE,
+ DebuggerIPCE_FuncEvalArgData *argData,
+ void **pMaybeInteriorPtrArray,
+ OBJECTREF *pObjectRefArg // out
+ DEBUG_ARG(DataLocation pDataLocationArray[])
+ )
+{
+ WRAPPER_NO_CONTRACT;
+
+ //
+ // See if we have a value type that is going to be passed as a 'this' pointer.
+ //
+ if ((pDE->m_evalType != DB_IPCE_FET_NEW_OBJECT) &&
+ !pDE->m_md->IsStatic() &&
+ (pDE->m_argCount > 0))
+ {
+ // Allocate the space for box nullables. Nullable parameters need a unboxed
+ // nullable value to point at, where our current representation does not have
+ // an unboxed value inside them. Thus we need another buffer to hold it (and
+ // gcprotects it. We used boxed values for this by converting them to 'true'
+ // nullable form, calling the function, and in the case of byrefs, converting
+ // them back afterward.
+
+ MethodTable* pMT = pDE->m_md->GetMethodTable();
+ if (Nullable::IsNullableType(pMT))
+ {
+ OBJECTREF obj = AllocateObject(pMT);
+ if (*pObjectRefArg != NULL)
+ {
+ BOOL typesMatch = Nullable::UnBox(obj->GetData(), *pObjectRefArg, pMT);
+ (void)typesMatch; //prevent "unused variable" error from GCC
+ _ASSERTE(typesMatch);
+ }
+ *pObjectRefArg = obj;
+ }
+
+ if (argData[0].argElementType == ELEMENT_TYPE_VALUETYPE)
+ {
+ //
+ // See if we need to box up the 'this' parameter.
+ //
+ if (!pDE->m_md->GetMethodTable()->IsValueType())
+ {
+ DebuggerIPCE_FuncEvalArgData *pFEAD = &argData[0];
+ SIZE_T v;
+ LPVOID pAddr = NULL;
+ INT64 bigVal;
+
+ {
+ GCX_FORBID(); //pAddr is unprotected from the time we initialize it
+
+ if (pFEAD->argAddr != NULL)
+ {
+ _ASSERTE(pDataLocationArray[0] & DL_MaybeInteriorPtrArray);
+ pAddr = pMaybeInteriorPtrArray[0];
+ INDEBUG(pDataLocationArray[0] &= ~DL_MaybeInteriorPtrArray);
+ }
+ else
+ {
+
+ pAddr = GetRegisterValueAndReturnAddress(pDE, pFEAD, &bigVal, &v);
+
+ if (pAddr == NULL)
+ {
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Generic"));
+ }
+ }
+
+ _ASSERTE(pAddr != NULL);
+ } //GCX_FORBID
+
+ GCPROTECT_BEGININTERIOR(pAddr); //ReadTypeHandle may trigger a GC and move the object that has the value type at pAddr as a field
+
+ //
+ // Grab the class of this value type. If the type is a parameterized
+ // struct type then it may not have yet been loaded by the EE (generics
+ // code sharing may have meant we have never bothered to create the exact
+ // type yet).
+ //
+ // A buffer should have been allocated for the full struct type
+ _ASSERTE(argData[0].fullArgType != NULL);
+ Debugger::TypeDataWalk walk((DebuggerIPCE_TypeArgData *) argData[0].fullArgType, argData[0].fullArgTypeNodeCount);
+
+ TypeHandle typeHandle = walk.ReadTypeHandle();
+
+ if (typeHandle.IsNull())
+ {
+ COMPlusThrow(kArgumentException, W("Argument_BadObjRef"));
+ }
+ //
+ // Box up this value type
+ //
+ *pObjectRefArg = typeHandle.GetMethodTable()->Box(pAddr);
+ if (Nullable::IsNullableType(typeHandle.GetMethodTable()) && (*pObjectRefArg == NULL))
+ {
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Obj"));
+ }
+ GCPROTECT_END();
+
+ INDEBUG(pDataLocationArray[0] |= DL_ObjectRefArray);
+ }
+ }
+ }
+}
+
+
+//
+// This is used to store (temporarily) information about the arguments that func-eval
+// will pass. It is used only for the args of the function, not the return buffer nor
+// the 'this' pointer, if there is any of either.
+//
+struct FuncEvalArgInfo
+{
+ CorElementType argSigType;
+ CorElementType byrefArgSigType;
+ TypeHandle byrefArgTypeHandle;
+ bool fNeedBoxOrUnbox;
+ TypeHandle sigTypeHandle;
+};
+
+
+
+/*
+ * GatherFuncEvalArgInfo
+ *
+ * This function is a helper for DoNormalFuncEval. It gathers together all the information
+ * necessary to process the arguments.
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * mSig - The metadata signature of the fuction to call.
+ * argData - Array of information about the arguments.
+ * pFEArgInfo - An array of structs to hold the argument information.
+ *
+ * Returns:
+ * None.
+ *
+ */
+void GatherFuncEvalArgInfo(DebuggerEval *pDE,
+ MetaSig mSig,
+ DebuggerIPCE_FuncEvalArgData *argData,
+ FuncEvalArgInfo *pFEArgInfo // out
+ )
+{
+ WRAPPER_NO_CONTRACT;
+
+ unsigned currArgIndex = 0;
+
+ if ((pDE->m_evalType == DB_IPCE_FET_NORMAL) && !pDE->m_md->IsStatic())
+ {
+ //
+ // Skip over the 'this' arg, since this function is not supposed to mess with it.
+ //
+ currArgIndex++;
+ }
+
+ //
+ // Gather all the information for the parameters.
+ //
+ for ( ; currArgIndex < pDE->m_argCount; currArgIndex++)
+ {
+ DebuggerIPCE_FuncEvalArgData *pFEAD = &argData[currArgIndex];
+
+ //
+ // Move to the next arg in the signature.
+ //
+ CorElementType argSigType = mSig.NextArgNormalized();
+ _ASSERTE(argSigType != ELEMENT_TYPE_END);
+
+ //
+ // If this arg is a byref arg, then we'll need to know what type we're referencing for later...
+ //
+ TypeHandle byrefTypeHandle = TypeHandle();
+ CorElementType byrefArgSigType = ELEMENT_TYPE_END;
+ if (argSigType == ELEMENT_TYPE_BYREF)
+ {
+ byrefArgSigType = mSig.GetByRefType(&byrefTypeHandle);
+ }
+
+ //
+ // If the sig says class but we've got a value class parameter, then remember that we need to box it. If
+ // the sig says value class, but we've got a boxed value class, then remember that we need to unbox it.
+ //
+ bool fNeedBoxOrUnbox = ((argSigType == ELEMENT_TYPE_CLASS) && (pFEAD->argElementType == ELEMENT_TYPE_VALUETYPE)) ||
+ ((argSigType == ELEMENT_TYPE_VALUETYPE) && ((pFEAD->argElementType == ELEMENT_TYPE_CLASS) || (pFEAD->argElementType == ELEMENT_TYPE_OBJECT)) ||
+ // This is when method signature is expecting a BYREF ValueType, yet we recieve the boxed valuetype's handle.
+ (pFEAD->argElementType == ELEMENT_TYPE_CLASS && argSigType == ELEMENT_TYPE_BYREF && byrefArgSigType == ELEMENT_TYPE_VALUETYPE));
+
+ pFEArgInfo[currArgIndex].argSigType = argSigType;
+ pFEArgInfo[currArgIndex].byrefArgSigType = byrefArgSigType;
+ pFEArgInfo[currArgIndex].byrefArgTypeHandle = byrefTypeHandle;
+ pFEArgInfo[currArgIndex].fNeedBoxOrUnbox = fNeedBoxOrUnbox;
+ pFEArgInfo[currArgIndex].sigTypeHandle = mSig.GetLastTypeHandleThrowing();
+ }
+}
+
+
+/*
+ * BoxFuncEvalArguments
+ *
+ * This function is a helper for DoNormalFuncEval. It boxes all the arguments that
+ * need to be.
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * argData - Array of information about the arguments.
+ * pFEArgInfo - An array of structs to hold the argument information.
+ * pMaybeInteriorPtrArray - An array that contains values that may be pointers to
+ * the interior of a managed object.
+ * pObjectRef - A GC protected place to put a boxed value, if necessary.
+ *
+ * Returns:
+ * None
+ *
+ */
+void BoxFuncEvalArguments(DebuggerEval *pDE,
+ DebuggerIPCE_FuncEvalArgData *argData,
+ FuncEvalArgInfo *pFEArgInfo,
+ void **pMaybeInteriorPtrArray,
+ OBJECTREF *pObjectRef // out
+ DEBUG_ARG(DataLocation pDataLocationArray[])
+ )
+{
+ WRAPPER_NO_CONTRACT;
+
+ unsigned currArgIndex = 0;
+
+
+ if ((pDE->m_evalType == DB_IPCE_FET_NORMAL) && !pDE->m_md->IsStatic())
+ {
+ //
+ // Skip over the 'this' arg, since this function is not supposed to mess with it.
+ //
+ currArgIndex++;
+ }
+
+ //
+ // Gather all the information for the parameters.
+ //
+ for ( ; currArgIndex < pDE->m_argCount; currArgIndex++)
+ {
+ DebuggerIPCE_FuncEvalArgData *pFEAD = &argData[currArgIndex];
+
+ // Allocate the space for box nullables. Nullable parameters need a unboxed
+ // nullable value to point at, where our current representation does not have
+ // an unboxed value inside them. Thus we need another buffer to hold it (and
+ // gcprotects it. We used boxed values for this by converting them to 'true'
+ // nullable form, calling the function, and in the case of byrefs, converting
+ // them back afterward.
+
+ TypeHandle th = pFEArgInfo[currArgIndex].sigTypeHandle;
+ if (pFEArgInfo[currArgIndex].argSigType == ELEMENT_TYPE_BYREF)
+ th = pFEArgInfo[currArgIndex].byrefArgTypeHandle;
+
+ if (!th.IsNull() && Nullable::IsNullableType(th))
+ {
+
+ OBJECTREF obj = AllocateObject(th.AsMethodTable());
+ if (pObjectRef[currArgIndex] != NULL)
+ {
+ BOOL typesMatch = Nullable::UnBox(obj->GetData(), pObjectRef[currArgIndex], th.AsMethodTable());
+ (void)typesMatch; //prevent "unused variable" error from GCC
+ _ASSERTE(typesMatch);
+ }
+ pObjectRef[currArgIndex] = obj;
+ }
+
+ //
+ // Check if we should box this value now
+ //
+ if ((pFEAD->argElementType == ELEMENT_TYPE_VALUETYPE) &&
+ (pFEArgInfo[currArgIndex].argSigType == ELEMENT_TYPE_BYREF) &&
+ pFEArgInfo[currArgIndex].fNeedBoxOrUnbox)
+ {
+ SIZE_T v;
+ INT64 bigVal;
+ LPVOID pAddr = NULL;
+
+ if (pFEAD->argAddr != NULL)
+ {
+ _ASSERTE(pDataLocationArray[currArgIndex] & DL_MaybeInteriorPtrArray);
+ pAddr = pMaybeInteriorPtrArray[currArgIndex];
+ INDEBUG(pDataLocationArray[currArgIndex] &= ~DL_MaybeInteriorPtrArray);
+ }
+ else
+ {
+
+ pAddr = GetRegisterValueAndReturnAddress(pDE, pFEAD, &bigVal, &v);
+
+ if (pAddr == NULL)
+ {
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Generic"));
+ }
+ }
+
+ _ASSERTE(pAddr != NULL);
+
+ MethodTable * pMT = pFEArgInfo[currArgIndex].sigTypeHandle.GetMethodTable();
+
+ //
+ // Stuff the newly boxed item into our GC-protected array.
+ //
+ pObjectRef[currArgIndex] = pMT->Box(pAddr);
+
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_ObjectRefArray;
+ }
+#endif
+ }
+ }
+}
+
+
+/*
+ * GatherFuncEvalMethodInfo
+ *
+ * This function is a helper for DoNormalFuncEval. It gathers together all the information
+ * necessary to process the method
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * mSig - The metadata signature of the fuction to call.
+ * argData - Array of information about the arguments.
+ * ppUnboxedMD - Returns a resolve method desc if the original is an unboxing stub.
+ * pObjectRefArray - GC protected array of objects passed to this func-eval call.
+ * used to resolve down to the method target for generics.
+ * pBufferForArgsArray - Array of values not needing gc-protection. May hold the
+ * handle for the method targer for generics.
+ * pfHasRetBuffArg - TRUE if the function has a return buffer.
+ * pRetValueType - The TypeHandle of the return value.
+ *
+ *
+ * Returns:
+ * None.
+ *
+ */
+void GatherFuncEvalMethodInfo(DebuggerEval *pDE,
+ MetaSig mSig,
+ DebuggerIPCE_FuncEvalArgData *argData,
+ MethodDesc **ppUnboxedMD,
+ OBJECTREF *pObjectRefArray,
+ INT64 *pBufferForArgsArray,
+ BOOL *pfHasRetBuffArg, // out
+ BOOL *pfHasNonStdByValReturn, // out
+ TypeHandle *pRetValueType // out, only if fHasRetBuffArg == true
+ DEBUG_ARG(DataLocation pDataLocationArray[])
+ )
+{
+ WRAPPER_NO_CONTRACT;
+
+ //
+ // If 'this' is a non-static function that points to an unboxing stub, we need to return the
+ // unboxed method desc to really call.
+ //
+ if ((pDE->m_evalType != DB_IPCE_FET_NEW_OBJECT) && !pDE->m_md->IsStatic() && pDE->m_md->IsUnboxingStub())
+ {
+ *ppUnboxedMD = pDE->m_md->GetMethodTable()->GetUnboxedEntryPointMD(pDE->m_md);
+ }
+
+ //
+ // Resolve down to the method on the class of the 'this' parameter.
+ //
+ if ((pDE->m_evalType != DB_IPCE_FET_NEW_OBJECT) && pDE->m_md->IsVtableMethod())
+ {
+ //
+ // Assuming that a constructor can't be an interface method...
+ //
+ _ASSERTE(pDE->m_evalType == DB_IPCE_FET_NORMAL);
+
+ //
+ // We need to go grab the 'this' argument to figure out what class we're headed for...
+ //
+ if (pDE->m_argCount == 0)
+ {
+ COMPlusThrow(kArgumentException, W("Argument_BadObjRef"));
+ }
+
+ //
+ // We should have a valid this pointer.
+ // <TODO>@todo: But the check should cover the register kind as well!</TODO>
+ //
+ if ((argData[0].argHome.kind == RAK_NONE) && (argData[0].argAddr == NULL))
+ {
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Generic"));
+ }
+
+ //
+ // Assume we can only have this for real objects or boxed value types, not value classes...
+ //
+ _ASSERTE((argData[0].argElementType == ELEMENT_TYPE_OBJECT) ||
+ (argData[0].argElementType == ELEMENT_TYPE_STRING) ||
+ (argData[0].argElementType == ELEMENT_TYPE_CLASS) ||
+ (argData[0].argElementType == ELEMENT_TYPE_ARRAY) ||
+ (argData[0].argElementType == ELEMENT_TYPE_SZARRAY) ||
+ ((argData[0].argElementType == ELEMENT_TYPE_VALUETYPE) &&
+ (pObjectRefArray[0] != NULL)));
+
+ //
+ // Now get the object pointer to our first arg.
+ //
+ OBJECTREF objRef = NULL;
+ GCPROTECT_BEGIN(objRef);
+
+ if (argData[0].argElementType == ELEMENT_TYPE_VALUETYPE)
+ {
+ //
+ // In this case, we know where it is.
+ //
+ objRef = pObjectRefArray[0];
+ _ASSERTE(pDataLocationArray[0] & DL_ObjectRefArray);
+ }
+ else
+ {
+ TypeHandle dummyTH;
+ ARG_SLOT objSlot;
+
+ //
+ // Take out the first arg. We're gonna trick GetFuncEvalArgValue by passing in just our
+ // object ref as the stack.
+ //
+ // Note that we are passing ELEMENT_TYPE_END in the last parameter because we want to
+ // supress the the valid object ref check.
+ //
+ GetFuncEvalArgValue(pDE,
+ &(argData[0]),
+ false,
+ false,
+ dummyTH,
+ ELEMENT_TYPE_CLASS,
+ dummyTH,
+ &objSlot,
+ NULL,
+ pObjectRefArray,
+ pBufferForArgsArray,
+ NULL,
+ ELEMENT_TYPE_END
+ DEBUG_ARG(pDataLocationArray[0])
+ );
+
+ objRef = ArgSlotToObj(objSlot);
+ }
+
+ //
+ // Validate the object
+ //
+ if (FAILED(ValidateObject(OBJECTREFToObject(objRef))))
+ {
+ COMPlusThrow(kArgumentException, W("Argument_BadObjRef"));
+ }
+
+ //
+ // Null isn't valid in this case!
+ //
+ if (objRef == NULL)
+ {
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Obj"));
+ }
+
+ //
+ // Make sure that the object supplied is of a type that can call the method supplied.
+ //
+ if (!g_pEEInterface->ObjIsInstanceOf(OBJECTREFToObject(objRef), pDE->m_ownerTypeHandle))
+ {
+ COMPlusThrow(kArgumentException, W("Argument_CORDBBadMethod"));
+ }
+
+ //
+ // Now, find the proper MethodDesc for this interface method based on the object we're invoking the
+ // method on.
+ //
+ pDE->m_targetCodeAddr = pDE->m_md->GetCallTarget(&objRef, pDE->m_ownerTypeHandle);
+
+ GCPROTECT_END();
+ }
+ else
+ {
+ pDE->m_targetCodeAddr = pDE->m_md->GetCallTarget(NULL, pDE->m_ownerTypeHandle);
+ }
+
+ //
+ // Get the resulting type now. Doing this may trigger a GC or throw.
+ //
+ if (pDE->m_evalType != DB_IPCE_FET_NEW_OBJECT)
+ {
+ pDE->m_resultType = mSig.GetRetTypeHandleThrowing();
+ }
+
+ //
+ // Check if there is an explicit return argument, or if the return type is really a VALUETYPE but our
+ // calling convention is passing it in registers. We just need to remember the pretValueClass so
+ // that we will box it properly on our way out.
+ //
+ {
+ ArgIterator argit(&mSig);
+ *pfHasRetBuffArg = argit.HasRetBuffArg();
+ *pfHasNonStdByValReturn = argit.HasNonStandardByvalReturn();
+ }
+
+ CorElementType retType = mSig.GetReturnType();
+ CorElementType retTypeNormalized = mSig.GetReturnTypeNormalized();
+
+
+ if (*pfHasRetBuffArg || *pfHasNonStdByValReturn
+ || ((retType == ELEMENT_TYPE_VALUETYPE) && (retType != retTypeNormalized)))
+ {
+ *pRetValueType = mSig.GetRetTypeHandleThrowing();
+ }
+ else
+ {
+ //
+ // Make sure the caller initialized this value
+ //
+ _ASSERTE((*pRetValueType).IsNull());
+ }
+}
+
+/*
+ * CopyArgsToBuffer
+ *
+ * This routine copies all the arguments to a local buffer, so that any one that needs to be
+ * passed can be. Note that this local buffer is NOT GC-protected, and so all the values
+ * in the buffer may not be relied on. You *must* use GetFuncEvalArgValue() to load up the
+ * Arguments for the call, because it has the logic to decide which of the parallel arrays to pull
+ * from.
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * argData - Array of information about the arguments.
+ * pFEArgInfo - An array of structs to hold the argument information. Must have be previously filled in.
+ * pBufferArray - An array to store values.
+ *
+ * Returns:
+ * None.
+ *
+ */
+void CopyArgsToBuffer(DebuggerEval *pDE,
+ DebuggerIPCE_FuncEvalArgData *argData,
+ FuncEvalArgInfo *pFEArgInfo,
+ INT64 *pBufferArray
+ DEBUG_ARG(DataLocation pDataLocationArray[])
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ unsigned currArgIndex = 0;
+
+
+ if ((pDE->m_evalType == DB_IPCE_FET_NORMAL) && !pDE->m_md->IsStatic())
+ {
+ //
+ // Skip over the 'this' arg, since this function is not supposed to mess with it.
+ //
+ currArgIndex++;
+ }
+
+ //
+ // Spin thru each argument now
+ //
+ for ( ; currArgIndex < pDE->m_argCount; currArgIndex++)
+ {
+ DebuggerIPCE_FuncEvalArgData *pFEAD = &argData[currArgIndex];
+ BOOL isByRef = (pFEArgInfo[currArgIndex].argSigType == ELEMENT_TYPE_BYREF);
+ BOOL fNeedBoxOrUnbox;
+ fNeedBoxOrUnbox = pFEArgInfo[currArgIndex].fNeedBoxOrUnbox;
+
+
+ LOG((LF_CORDB, LL_EVERYTHING, "CATB: currArgIndex=%d\n",
+ currArgIndex));
+ LOG((LF_CORDB, LL_EVERYTHING,
+ "\t: argSigType=0x%x, byrefArgSigType=0x%0x, inType=0x%0x\n",
+ pFEArgInfo[currArgIndex].argSigType,
+ pFEArgInfo[currArgIndex].byrefArgSigType,
+ pFEAD->argElementType));
+
+ INT64 *pDest = &(pBufferArray[currArgIndex]);
+
+ switch (pFEAD->argElementType)
+ {
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R8:
+
+ if (pFEAD->argAddr != NULL)
+ {
+ *pDest = *(INT64*)(pFEAD->argAddr);
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_BufferForArgsArray;
+ }
+#endif
+ }
+ else if (pFEAD->argIsLiteral)
+ {
+ _ASSERTE(sizeof(pFEAD->argLiteralData) >= sizeof(void *));
+
+ // If this is a literal arg, then we just copy the data.
+ memcpy(pDest, pFEAD->argLiteralData, sizeof(INT64));
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_BufferForArgsArray;
+ }
+#endif
+ }
+ else
+ {
+
+#if !defined(_WIN64)
+ // RAK_REG is the only 4 byte type, all others are 8 byte types.
+ _ASSERTE(pFEAD->argHome.kind != RAK_REG);
+
+ INT64 bigVal = 0;
+ SIZE_T v;
+ INT64 *pAddr;
+
+ pAddr = (INT64*)GetRegisterValueAndReturnAddress(pDE, pFEAD, &bigVal, &v);
+
+ if (pAddr == NULL)
+ {
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Generic"));
+ }
+
+ *pDest = *pAddr;
+
+#else // _WIN64
+ // Both RAK_REG and RAK_FLOAT can be either 4 bytes or 8 bytes.
+ _ASSERTE((pFEAD->argHome.kind == RAK_REG) || (pFEAD->argHome.kind == RAK_FLOAT));
+
+ CorDebugRegister regNum = GetArgAddrFromReg(pFEAD);
+ *pDest = GetRegisterValue(pDE, regNum, pFEAD->argHome.reg1Addr, pFEAD->argHome.reg1Value);
+#endif // _WIN64
+
+
+
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_BufferForArgsArray;
+ }
+#endif
+ }
+ break;
+
+ case ELEMENT_TYPE_VALUETYPE:
+
+ //
+ // For value types, we dont do anything here, instead delay until GetFuncEvalArgInfo
+ //
+ break;
+
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_OBJECT:
+ case ELEMENT_TYPE_STRING:
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+
+ if (pFEAD->argAddr != NULL)
+ {
+ if (!isByRef)
+ {
+ if (pFEAD->argIsHandleValue)
+ {
+ OBJECTHANDLE oh = (OBJECTHANDLE)(pFEAD->argAddr);
+ *pDest = (INT64)(size_t)oh;
+ }
+ else
+ {
+ *pDest = *((SIZE_T*)(pFEAD->argAddr));
+ }
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_BufferForArgsArray;
+ }
+#endif
+ }
+ else
+ {
+ if (pFEAD->argIsHandleValue)
+ {
+ *pDest = (INT64)(size_t)(pFEAD->argAddr);
+ }
+ else
+ {
+ *pDest = *(SIZE_T*)(pFEAD->argAddr);
+ }
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_BufferForArgsArray;
+ }
+#endif
+ }
+ }
+ else if (pFEAD->argIsLiteral)
+ {
+ _ASSERTE(sizeof(pFEAD->argLiteralData) >= sizeof(INT64));
+
+ // The called function may expect a larger/smaller value than the literal value.
+ // So we convert the value to the right type.
+
+ CONSISTENCY_CHECK_MSGF(((pFEArgInfo[currArgIndex].argSigType == ELEMENT_TYPE_CLASS) ||
+ (pFEArgInfo[currArgIndex].argSigType == ELEMENT_TYPE_SZARRAY) ||
+ (pFEArgInfo[currArgIndex].argSigType == ELEMENT_TYPE_ARRAY)) ||
+ (isByRef && ((pFEArgInfo[currArgIndex].byrefArgSigType == ELEMENT_TYPE_CLASS) ||
+ (pFEArgInfo[currArgIndex].byrefArgSigType == ELEMENT_TYPE_SZARRAY) ||
+ (pFEArgInfo[currArgIndex].byrefArgSigType == ELEMENT_TYPE_ARRAY))),
+ ("argSigType=0x%0x, byrefArgSigType=0x%0x, isByRef=%d",
+ pFEArgInfo[currArgIndex].argSigType,
+ pFEArgInfo[currArgIndex].byrefArgSigType,
+ isByRef));
+
+ LOG((LF_CORDB, LL_EVERYTHING,
+ "argSigType=0x%0x, byrefArgSigType=0x%0x, isByRef=%d\n",
+ pFEArgInfo[currArgIndex].argSigType, pFEArgInfo[currArgIndex].byrefArgSigType, isByRef));
+
+ *(SIZE_T*)pDest = *(SIZE_T*)pFEAD->argLiteralData;
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_BufferForArgsArray;
+ }
+#endif
+ }
+ else
+ {
+ // RAK_REG is the only valid 4 byte type on WIN32. On WIN64, RAK_REG and RAK_FLOAT
+ // can both be either 4 bytes or 8 bytes;
+ _ASSERTE((pFEAD->argHome.kind == RAK_REG)
+ WIN64_ONLY(|| (pFEAD->argHome.kind == RAK_FLOAT)));
+
+ CorDebugRegister regNum = GetArgAddrFromReg(pFEAD);
+
+ // Simply grab the value out of the proper register.
+ SIZE_T v = GetRegisterValue(pDE, regNum, pFEAD->argHome.reg1Addr, pFEAD->argHome.reg1Value);
+ *pDest = v;
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_BufferForArgsArray;
+ }
+#endif
+ }
+ break;
+
+ default:
+ // 4-byte, 2-byte, or 1-byte values
+
+ if (pFEAD->argAddr != NULL)
+ {
+ if (!isByRef)
+ {
+ if (pFEAD->argIsHandleValue)
+ {
+ OBJECTHANDLE oh = (OBJECTHANDLE)(pFEAD->argAddr);
+ *pDest = (INT64)(size_t)oh;
+ }
+ else
+ {
+ GetAndSetLiteralValue(pDest, pFEArgInfo[currArgIndex].argSigType,
+ pFEAD->argAddr, pFEAD->argElementType);
+ }
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_BufferForArgsArray;
+ }
+#endif
+ }
+ else
+ {
+ if (pFEAD->argIsHandleValue)
+ {
+ *pDest = (INT64)(size_t)(pFEAD->argAddr);
+ }
+ else
+ {
+ // We have to make sure we only grab the correct size of memory from the source. On IA64, we
+ // have to make sure we don't cause misaligned data exceptions as well. Then we put the value
+ // into the pBufferArray. The reason is that we may be passing in some values by ref to a
+ // function that's expecting something of a bigger size. Thus, if we don't do this, then we'll
+ // be bashing memory right next to the source value as the function being called acts upon some
+ // bigger value.
+ GetAndSetLiteralValue(pDest, pFEArgInfo[currArgIndex].byrefArgSigType,
+ pFEAD->argAddr, pFEAD->argElementType);
+ }
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_BufferForArgsArray;
+ }
+#endif
+ }
+ }
+ else if (pFEAD->argIsLiteral)
+ {
+ _ASSERTE(sizeof(pFEAD->argLiteralData) >= sizeof(INT32));
+
+ // The called function may expect a larger/smaller value than the literal value,
+ // so we convert the value to the right type.
+
+ CONSISTENCY_CHECK_MSGF(
+ ((pFEArgInfo[currArgIndex].argSigType>=ELEMENT_TYPE_BOOLEAN) && (pFEArgInfo[currArgIndex].argSigType<=ELEMENT_TYPE_R8)) ||
+ (pFEArgInfo[currArgIndex].argSigType == ELEMENT_TYPE_PTR) ||
+ (pFEArgInfo[currArgIndex].argSigType == ELEMENT_TYPE_I) ||
+ (pFEArgInfo[currArgIndex].argSigType == ELEMENT_TYPE_U) ||
+ (isByRef && ((pFEArgInfo[currArgIndex].byrefArgSigType>=ELEMENT_TYPE_BOOLEAN) && (pFEArgInfo[currArgIndex].byrefArgSigType<=ELEMENT_TYPE_R8))),
+ ("argSigType=0x%0x, byrefArgSigType=0x%0x, isByRef=%d", pFEArgInfo[currArgIndex].argSigType, pFEArgInfo[currArgIndex].byrefArgSigType, isByRef));
+
+ LOG((LF_CORDB, LL_EVERYTHING,
+ "argSigType=0x%0x, byrefArgSigType=0x%0x, isByRef=%d\n",
+ pFEArgInfo[currArgIndex].argSigType,
+ pFEArgInfo[currArgIndex].byrefArgSigType,
+ isByRef));
+
+ CorElementType relevantType = (isByRef ? pFEArgInfo[currArgIndex].byrefArgSigType : pFEArgInfo[currArgIndex].argSigType);
+
+ GetAndSetLiteralValue(pDest, relevantType, pFEAD->argLiteralData, pFEAD->argElementType);
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_BufferForArgsArray;
+ }
+#endif
+ }
+ else
+ {
+ // RAK_REG is the only valid 4 byte type on WIN32. On WIN64, RAK_REG and RAK_FLOAT
+ // can both be either 4 bytes or 8 bytes;
+ _ASSERTE((pFEAD->argHome.kind == RAK_REG)
+ WIN64_ONLY(|| (pFEAD->argHome.kind == RAK_FLOAT)));
+
+ CorDebugRegister regNum = GetArgAddrFromReg(pFEAD);
+
+ // Simply grab the value out of the proper register.
+ SIZE_T v = GetRegisterValue(pDE, regNum, pFEAD->argHome.reg1Addr, pFEAD->argHome.reg1Value);
+ *pDest = v;
+#ifdef _DEBUG
+ if (currArgIndex < MAX_DATA_LOCATIONS_TRACKED)
+ {
+ pDataLocationArray[currArgIndex] |= DL_BufferForArgsArray;
+ }
+#endif
+ }
+ }
+ }
+}
+
+
+/*
+ * PackArgumentArray
+ *
+ * This routine fills a given array with the correct values for passing to a managed function.
+ * It uses various component arrays that contain information to correctly create the argument array.
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * argData - Array of information about the arguments.
+ * pUnboxedMD - MethodDesc of the function to call, after unboxing.
+ * RetValueType - Type Handle of the return value of the managed function we will call.
+ * pFEArgInfo - An array of structs to hold the argument information. Must have be previously filled in.
+ * pObjectRefArray - An array that contains any object refs. It was built previously.
+ * pMaybeInteriorPtrArray - An array that contains values that may be pointers to
+ * the interior of a managed object.
+ * pBufferForArgsArray - An array that contains values that need writable memory space
+ * for passing ByRef.
+ * newObj - Pre-allocated object for a 'new' call.
+ * pArguments - This array is packed from the above arrays.
+ * ppRetValue - Return value buffer if fRetValueArg is TRUE
+ *
+ * Returns:
+ * None.
+ *
+ */
+void PackArgumentArray(DebuggerEval *pDE,
+ DebuggerIPCE_FuncEvalArgData *argData,
+ FuncEvalArgInfo *pFEArgInfo,
+ MethodDesc *pUnboxedMD,
+ TypeHandle RetValueType,
+ OBJECTREF *pObjectRefArray,
+ void **pMaybeInteriorPtrArray,
+ INT64 *pBufferForArgsArray,
+ ValueClassInfo ** ppProtectedValueClasses,
+ OBJECTREF newObj,
+ BOOL fRetValueArg,
+ ARG_SLOT *pArguments,
+ PVOID * ppRetValue
+ DEBUG_ARG(DataLocation pDataLocationArray[])
+ )
+{
+ WRAPPER_NO_CONTRACT;
+
+ GCX_FORBID();
+
+ unsigned currArgIndex = 0;
+ unsigned currArgSlot = 0;
+
+
+ //
+ // THIS POINTER (if any)
+ // For non-static methods, or when returning a new object,
+ // the first arg in the array is 'this' or the new object.
+ //
+ if (pDE->m_evalType == DB_IPCE_FET_NEW_OBJECT)
+ {
+ //
+ // If this is a new object op, then we need to fill in the 0'th
+ // arg slot with the 'this' ptr.
+ //
+ pArguments[0] = ObjToArgSlot(newObj);
+
+ //
+ // If we are invoking a function on a value class, but we have a boxed value class for 'this',
+ // then go ahead and unbox it and leave a ref to the value class on the stack as 'this'.
+ //
+ if (pDE->m_md->GetMethodTable()->IsValueType())
+ {
+ _ASSERTE(newObj->GetMethodTable()->IsValueType());
+
+ // This is one of those places we use true boxed nullables
+ _ASSERTE(!Nullable::IsNullableType(pDE->m_md->GetMethodTable()) ||
+ newObj->GetMethodTable() == pDE->m_md->GetMethodTable());
+ void *pData = newObj->GetData();
+ pArguments[0] = PtrToArgSlot(pData);
+ }
+
+ //
+ // Bump up the arg slot
+ //
+ currArgSlot++;
+ }
+ else if (!pDE->m_md->IsStatic())
+ {
+ //
+ // Place 'this' first in the array for non-static methods.
+ //
+ TypeHandle dummyTH;
+ bool isByRef = false;
+ bool fNeedBoxOrUnbox = false;
+
+ // We had better have an object for a 'this' argument!
+ CorElementType et = argData[0].argElementType;
+
+ if (!(IsElementTypeSpecial(et) ||
+ et == ELEMENT_TYPE_VALUETYPE))
+ {
+ COMPlusThrow(kArgumentOutOfRangeException, W("ArgumentOutOfRange_Enum"));
+ }
+
+ LOG((LF_CORDB, LL_EVERYTHING, "this: currArgSlot=%d, currArgIndex=%d et=0x%x\n", currArgSlot, currArgIndex, et));
+
+ if (pDE->m_md->GetMethodTable()->IsValueType())
+ {
+ // For value classes, the 'this' parameter is always passed by reference.
+ // However do not unbox if we are calling an unboxing stub.
+ if (pDE->m_md == pUnboxedMD)
+ {
+ // pDE->m_md is expecting an unboxed this pointer. Then we will unbox it.
+ isByRef = true;
+
+ // Remember if we need to unbox this parameter, though.
+ if ((et == ELEMENT_TYPE_CLASS) || (et == ELEMENT_TYPE_OBJECT))
+ {
+ fNeedBoxOrUnbox = true;
+ }
+ }
+ }
+ else if (et == ELEMENT_TYPE_VALUETYPE)
+ {
+ // When the method that we invoking is defined on non value type and we receive the ValueType as input,
+ // we are calling methods on System.Object. In this case, we need to box the input ValueType.
+ fNeedBoxOrUnbox = true;
+ }
+
+ GetFuncEvalArgValue(pDE,
+ &argData[currArgIndex],
+ isByRef,
+ fNeedBoxOrUnbox,
+ dummyTH,
+ ELEMENT_TYPE_CLASS,
+ pDE->m_md->GetMethodTable(),
+ &(pArguments[currArgSlot]),
+ &(pMaybeInteriorPtrArray[currArgIndex]),
+ &(pObjectRefArray[currArgIndex]),
+ &(pBufferForArgsArray[currArgIndex]),
+ NULL,
+ ELEMENT_TYPE_OBJECT
+ DEBUG_ARG((currArgIndex < MAX_DATA_LOCATIONS_TRACKED) ? pDataLocationArray[currArgIndex]
+ : DL_All)
+ );
+
+ LOG((LF_CORDB, LL_EVERYTHING, "this = 0x%08x\n", ArgSlotToPtr(pArguments[currArgSlot])));
+
+ // We need to check 'this' for a null ref ourselves... NOTE: only do this if we put an object reference on
+ // the stack. If we put a byref for a value type, then we don't need to do this!
+ if (!isByRef)
+ {
+ // The this pointer is not a unboxed value type.
+
+ ARG_SLOT oi1 = pArguments[currArgSlot];
+ OBJECTREF o1 = ArgSlotToObj(oi1);
+
+ if (FAILED(ValidateObject(OBJECTREFToObject(o1))))
+ {
+ COMPlusThrow(kArgumentException, W("Argument_BadObjRef"));
+ }
+
+ if (OBJECTREFToObject(o1) == NULL)
+ {
+ COMPlusThrow(kNullReferenceException, W("NullReference_This"));
+ }
+
+ // For interface method, we have already done the check early on.
+ if (!pDE->m_md->IsInterface())
+ {
+ // We also need to make sure that the method that we are invoking is either defined on this object or the direct/indirect
+ // base objects.
+ Object *objPtr = OBJECTREFToObject(o1);
+ MethodTable *pMT = objPtr->GetMethodTable();
+ // <TODO> Do this check in the following cases as well... </TODO>
+ if (!pMT->IsArray()
+ && !pMT->IsTransparentProxy()
+ && !pDE->m_md->IsSharedByGenericInstantiations())
+ {
+ TypeHandle thFrom = TypeHandle(pMT);
+ TypeHandle thTarget = TypeHandle(pDE->m_md->GetMethodTable());
+ //<TODO> What about MaybeCast?</TODO>
+ if (thFrom.CanCastToNoGC(thTarget) == TypeHandle::CannotCast)
+ {
+ COMPlusThrow(kArgumentException, W("Argument_CORDBBadMethod"));
+ }
+ }
+ }
+ }
+
+ //
+ // Increment up both arrays.
+ //
+ currArgSlot++;
+ currArgIndex++;
+ }
+
+ // Special handling for functions that return value classes.
+ if (fRetValueArg)
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "retBuff: currArgSlot=%d, currArgIndex=%d\n", currArgSlot, currArgIndex));
+
+ //
+ // Allocate buffer for return value and GC protect it in case it contains object references
+ //
+ unsigned size = RetValueType.GetMethodTable()->GetNumInstanceFieldBytes();
+
+#ifdef FEATURE_HFA
+ // The buffer for HFAs has to be always ENREGISTERED_RETURNTYPE_MAXSIZE
+ size = max(size, ENREGISTERED_RETURNTYPE_MAXSIZE);
+#endif
+
+ BYTE * pTemp = new (interopsafe) BYTE[ALIGN_UP(sizeof(ValueClassInfo), 8) + size];
+
+ ValueClassInfo * pValueClassInfo = (ValueClassInfo *)pTemp;
+ LPVOID pData = pTemp + ALIGN_UP(sizeof(ValueClassInfo), 8);
+
+ memset(pData, 0, size);
+
+ pValueClassInfo->pData = pData;
+ pValueClassInfo->pMT = RetValueType.GetMethodTable();
+
+ pValueClassInfo->pNext = *ppProtectedValueClasses;
+ *ppProtectedValueClasses = pValueClassInfo;
+
+ pArguments[currArgSlot++] = PtrToArgSlot(pData);
+ *ppRetValue = pData;
+ }
+
+ // REAL ARGUMENTS (if any)
+ // Now do the remaining args
+ for ( ; currArgIndex < pDE->m_argCount; currArgSlot++, currArgIndex++)
+ {
+ DebuggerIPCE_FuncEvalArgData *pFEAD = &argData[currArgIndex];
+
+ LOG((LF_CORDB, LL_EVERYTHING, "currArgSlot=%d, currArgIndex=%d\n",
+ currArgSlot,
+ currArgIndex));
+ LOG((LF_CORDB, LL_EVERYTHING,
+ "\t: argSigType=0x%x, byrefArgSigType=0x%0x, inType=0x%0x\n",
+ pFEArgInfo[currArgIndex].argSigType,
+ pFEArgInfo[currArgIndex].byrefArgSigType,
+ pFEAD->argElementType));
+
+
+ GetFuncEvalArgValue(pDE,
+ pFEAD,
+ pFEArgInfo[currArgIndex].argSigType == ELEMENT_TYPE_BYREF,
+ pFEArgInfo[currArgIndex].fNeedBoxOrUnbox,
+ pFEArgInfo[currArgIndex].sigTypeHandle,
+ pFEArgInfo[currArgIndex].byrefArgSigType,
+ pFEArgInfo[currArgIndex].byrefArgTypeHandle,
+ &(pArguments[currArgSlot]),
+ &(pMaybeInteriorPtrArray[currArgIndex]),
+ &(pObjectRefArray[currArgIndex]),
+ &(pBufferForArgsArray[currArgIndex]),
+ ppProtectedValueClasses,
+ pFEArgInfo[currArgIndex].argSigType
+ DEBUG_ARG((currArgIndex < MAX_DATA_LOCATIONS_TRACKED) ? pDataLocationArray[currArgIndex]
+ : DL_All)
+ );
+ }
+}
+
+/*
+ * UnpackFuncEvalResult
+ *
+ * This routine takes the resulting object of a func-eval, and does any copying, boxing, unboxing, necessary.
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * newObj - Pre-allocated object for NEW_OBJ func-evals.
+ * retObject - Pre-allocated object to be filled in with the info in pRetBuff.
+ * RetValueType - The return type of the function called.
+ * pRetBuff - The raw bytes returned by the func-eval call when there is a return buffer parameter.
+ *
+ *
+ * Returns:
+ * None.
+ *
+ */
+void UnpackFuncEvalResult(DebuggerEval *pDE,
+ OBJECTREF newObj,
+ OBJECTREF retObject,
+ TypeHandle RetValueType,
+ void *pRetBuff
+ )
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+
+ // Ah, but if this was a new object op, then the result is really
+ // the object we allocated above...
+ if (pDE->m_evalType == DB_IPCE_FET_NEW_OBJECT)
+ {
+ // We purposely do not morph nullables to be boxed Ts here because debugger EE's otherwise
+ // have no way of creating true nullables that they need for their own purposes.
+ pDE->m_result = ObjToArgSlot(newObj);
+ pDE->m_retValueBoxing = Debugger::AllBoxed;
+ }
+ else if (!RetValueType.IsNull())
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "FuncEval call is saving a boxed VC return value.\n"));
+
+ //
+ // We pre-created it above
+ //
+ _ASSERTE(retObject != NULL);
+
+ // This is one of those places we use true boxed nullables
+ _ASSERTE(!Nullable::IsNullableType(RetValueType)||
+ retObject->GetMethodTable() == RetValueType.GetMethodTable());
+
+ if (pRetBuff != NULL)
+ {
+ // box the object
+ CopyValueClass(retObject->GetData(),
+ pRetBuff,
+ RetValueType.GetMethodTable(),
+ retObject->GetAppDomain());
+ }
+ else
+ {
+ // box the primitive returned, retObject is a true nullable for nullabes, It will be Normalized later
+ CopyValueClass(retObject->GetData(),
+ &(pDE->m_result),
+ RetValueType.GetMethodTable(),
+ retObject->GetAppDomain());
+ }
+
+ pDE->m_result = ObjToArgSlot(retObject);
+ pDE->m_retValueBoxing = Debugger::AllBoxed;
+ }
+ else
+ {
+ //
+ // Other FuncEvals return primitives as unboxed.
+ //
+ pDE->m_retValueBoxing = Debugger::OnlyPrimitivesUnboxed;
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "FuncEval call has saved the return value.\n"));
+ // No exception, so it worked as far as we're concerned.
+ pDE->m_successful = true;
+
+ // If the result is an object, then place the object
+ // reference into a strong handle and place the handle into the
+ // pDE to protect the result from a collection.
+ CorElementType retClassET = pDE->m_resultType.GetSignatureCorElementType();
+
+ if ((pDE->m_retValueBoxing == Debugger::AllBoxed) ||
+ !RetValueType.IsNull() ||
+ IsElementTypeSpecial(retClassET))
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "Creating strong handle for boxed DoNormalFuncEval result.\n"));
+ OBJECTHANDLE oh = pDE->m_thread->GetDomain()->CreateStrongHandle(ArgSlotToObj(pDE->m_result));
+ pDE->m_result = (INT64)(LONG_PTR)oh;
+ pDE->m_vmObjectHandle = VMPTR_OBJECTHANDLE::MakePtr(oh);
+ }
+}
+
+/*
+ * UnpackFuncEvalArguments
+ *
+ * This routine takes the resulting object of a func-eval, and does any copying, boxing, unboxing, necessary.
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * newObj - Pre-allocated object for NEW_OBJ func-evals.
+ * retObject - Pre-allocated object to be filled in with the info in pSource.
+ * RetValueType - The return type of the function called.
+ * pSource - The raw bytes returned by the func-eval call when there is a hidden parameter.
+ *
+ *
+ * Returns:
+ * None.
+ *
+ */
+void UnpackFuncEvalArguments(DebuggerEval *pDE,
+ DebuggerIPCE_FuncEvalArgData *argData,
+ MetaSig mSig,
+ BOOL staticMethod,
+ OBJECTREF *pObjectRefArray,
+ void **pMaybeInteriorPtrArray,
+ void **pByRefMaybeInteriorPtrArray,
+ INT64 *pBufferForArgsArray
+ )
+{
+ WRAPPER_NO_CONTRACT;
+
+ // Update any enregistered byrefs with their new values from the
+ // proper byref temporary array.
+ if (pDE->m_argCount > 0)
+ {
+ mSig.Reset();
+
+ unsigned currArgIndex = 0;
+
+ if ((pDE->m_evalType == DB_IPCE_FET_NORMAL) && !pDE->m_md->IsStatic())
+ {
+ //
+ // Skip over the 'this' arg, since this function is not supposed to mess with it.
+ //
+ currArgIndex++;
+ }
+
+ for (; currArgIndex < pDE->m_argCount; currArgIndex++)
+ {
+ CorElementType argSigType = mSig.NextArgNormalized();
+
+ LOG((LF_CORDB, LL_EVERYTHING, "currArgIndex=%d argSigType=0x%x\n", currArgIndex, argSigType));
+
+ _ASSERTE(argSigType != ELEMENT_TYPE_END);
+
+ if (argSigType == ELEMENT_TYPE_BYREF)
+ {
+ TypeHandle byrefClass = TypeHandle();
+ CorElementType byrefArgSigType = mSig.GetByRefType(&byrefClass);
+
+ // If these are the true boxed nullables we created in BoxFuncEvalArguments, convert them back
+ pObjectRefArray[currArgIndex] = Nullable::NormalizeBox(pObjectRefArray[currArgIndex]);
+
+ LOG((LF_CORDB, LL_EVERYTHING, "DoNormalFuncEval: Updating enregistered byref...\n"));
+ SetFuncEvalByRefArgValue(pDE,
+ &argData[currArgIndex],
+ byrefArgSigType,
+ pBufferForArgsArray[currArgIndex],
+ pMaybeInteriorPtrArray[currArgIndex],
+ pByRefMaybeInteriorPtrArray[currArgIndex],
+ pObjectRefArray[currArgIndex]
+ );
+ }
+ }
+ }
+}
+
+
+/*
+ * FuncEvalWrapper
+ *
+ * Helper function for func-eval. We have to split it out so that we can put a __try / __finally in to
+ * notify on a Catch-Handler found.
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * pArguments - created stack to pass for the call.
+ * pCatcherStackAddr - stack address to report as the Catch Handler Found location.
+ *
+ * Returns:
+ * None.
+ *
+ */
+void FuncEvalWrapper(MethodDescCallSite* pMDCS, DebuggerEval *pDE, ARG_SLOT *pArguments, BYTE *pCatcherStackAddr)
+{
+ struct Param : NotifyOfCHFFilterWrapperParam
+ {
+ MethodDescCallSite* pMDCS;
+ DebuggerEval *pDE;
+ ARG_SLOT *pArguments;
+ };
+
+ Param param;
+ param.pFrame = pCatcherStackAddr; // Inherited from NotifyOfCHFFilterWrapperParam
+ param.pMDCS = pMDCS;
+ param.pDE = pDE;
+ param.pArguments = pArguments;
+
+ PAL_TRY(Param *, pParam, &param)
+ {
+ pParam->pDE->m_result = pParam->pMDCS->CallWithValueTypes_RetArgSlot(pParam->pArguments);
+ }
+ PAL_EXCEPT_FILTER(NotifyOfCHFFilterWrapper)
+ {
+ // Should never reach here b/c handler should always continue search.
+ _ASSERTE(false);
+ }
+ PAL_ENDTRY
+}
+
+/*
+ * RecordFuncEvalException
+ *
+ * Helper function records the details of an exception that occured during a FuncEval
+ * Note that this should be called from within the target domain of the FuncEval.
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed
+ * ppException - the Exception object that was thrown
+ *
+ * Returns:
+ * None.
+ */
+static void RecordFuncEvalException(DebuggerEval *pDE,
+ OBJECTREF ppException )
+{
+ CONTRACTL
+ {
+ THROWS; // CreateStrongHandle could throw OOM
+ GC_NOTRIGGER;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ // We got an exception. Make the exception into our result.
+ pDE->m_successful = false;
+ LOG((LF_CORDB, LL_EVERYTHING, "D::FEHW - Exception during funceval.\n"));
+
+ //
+ // Special handling for thread abort exceptions. We need to explicitly reset the
+ // abort request on the EE thread, then make sure to place this thread on a thunk
+ // that will re-raise the exception when we continue the process. Note: we still
+ // pass this thread abort exception up as the result of the eval.
+ //
+ if (IsExceptionOfType(kThreadAbortException, &ppException))
+ {
+ if (pDE->m_aborting != DebuggerEval::FE_ABORT_NONE)
+ {
+ //
+ // Reset the abort request.
+ //
+ pDE->m_thread->UserResetAbort(Thread::TAR_FuncEval);
+
+ //
+ // This is the abort we sent down.
+ //
+ pDE->m_result = NULL;
+ pDE->m_resultType = TypeHandle();
+ pDE->m_aborted = true;
+ pDE->m_retValueBoxing = Debugger::NoValueTypeBoxing;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "D::FEHW - funceval abort exception.\n"));
+
+ }
+ else
+ {
+ //
+ // This must have come from somewhere else, remember that we need to
+ // rethrow this.
+ //
+ pDE->m_rethrowAbortException = true;
+
+ //
+ // The result is the exception object.
+ //
+ pDE->m_result = ObjToArgSlot(ppException);
+
+ pDE->m_resultType = ppException->GetTypeHandle();
+ OBJECTHANDLE oh = pDE->m_thread->GetDomain()->CreateStrongHandle(ArgSlotToObj(pDE->m_result));
+ pDE->m_result = (INT64)PTR_TO_CORDB_ADDRESS(oh);
+ pDE->m_vmObjectHandle = VMPTR_OBJECTHANDLE::MakePtr(oh);
+ pDE->m_retValueBoxing = Debugger::NoValueTypeBoxing;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "D::FEHW - Non-FE abort thread abort..\n"));
+ }
+ }
+ else
+ {
+
+ //
+ // The result is the exception object.
+ //
+ pDE->m_result = ObjToArgSlot(ppException);
+
+ pDE->m_resultType = ppException->GetTypeHandle();
+ OBJECTHANDLE oh = pDE->m_thread->GetDomain()->CreateStrongHandle(ArgSlotToObj(pDE->m_result));
+ pDE->m_result = (INT64)(LONG_PTR)oh;
+ pDE->m_vmObjectHandle = VMPTR_OBJECTHANDLE::MakePtr(oh);
+
+ pDE->m_retValueBoxing = Debugger::NoValueTypeBoxing;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "D::FEHW - Exception for the user.\n"));
+ }
+}
+
+
+/*
+ * DoNormalFuncEval
+ *
+ * Does the main body of work (steps 1c onward) for the normal func-eval algorithm detailed at the
+ * top of this file. The args have already been GC protected and we've transitioned into the appropriate
+ * domain (steps 1a & 1b). This has to be a seperate function from GCProtectArgsAndDoNormalFuncEval
+ * because otherwise we can't reliably find the right GCFrames to pop when unwinding the stack due to
+ * an exception on 64-bit platforms (we have some GCFrames outside of the TRY, and some inside,
+ * and they won't necesarily be layed out sequentially on the stack if they are all in the same function).
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * pCatcherStackAddr - stack address to report as the Catch Handler Found location.
+ * pObjectRefArray - An array to hold object ref args. This array is protected from GC's.
+ * pMaybeInteriorPtrArray - An array to hold values that may be pointers into a managed object.
+ * This array is protected from GCs.
+ * pByRefMaybeInteriorPtrArray - An array to hold values that may be pointers into a managed
+ * object. This array is protected from GCs. This array protects the address of the arguments
+ * while the pMaybeInteriorPtrArray protects the value of the arguments. We need to do this
+ * because of by ref arguments.
+ * pBufferForArgsArray - a buffer of temporary scratch space for things that do not need to be
+ * protected, or are protected for free (e.g. Handles).
+ * pDataLocationArray - an array of tracking data for debug sanity checks
+ *
+ * Returns:
+ * None.
+ */
+static void DoNormalFuncEval( DebuggerEval *pDE,
+ BYTE *pCatcherStackAddr,
+ OBJECTREF *pObjectRefArray,
+ void **pMaybeInteriorPtrArray,
+ void **pByRefMaybeInteriorPtrArray,
+ INT64 *pBufferForArgsArray,
+ ValueClassInfo ** ppProtectedValueClasses
+ DEBUG_ARG(DataLocation pDataLocationArray[])
+ )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ //
+ // Now that all the args are protected, we can go back and deal with generic args and resolving
+ // all their information.
+ //
+ ResolveFuncEvalGenericArgInfo(pDE);
+
+ //
+ // Grab the signature of the method we're working on and do some error checking.
+ // Note that if this instantiated generic code, then this will
+ // correctly give as an instantiated view of the signature that we can iterate without
+ // worrying about generic items in the signature.
+ //
+ MetaSig mSig(pDE->m_md);
+
+ BYTE callingconvention = mSig.GetCallingConvention();
+ if (!isCallConv(callingconvention, IMAGE_CEE_CS_CALLCONV_DEFAULT))
+ {
+ // We don't support calling vararg!
+ COMPlusThrow(kArgumentException, W("Argument_CORDBBadVarArgCallConv"));
+ }
+
+ //
+ // We'll need to know if this is a static method or not.
+ //
+ BOOL staticMethod = pDE->m_md->IsStatic();
+
+ _ASSERTE((pDE->m_evalType == DB_IPCE_FET_NORMAL) || !staticMethod);
+
+ //
+ // Do Step 1c - Pre-allocate space for new objects.
+ //
+ OBJECTREF newObj = NULL;
+ GCPROTECT_BEGIN(newObj);
+
+ SIZE_T allocArgCnt = 0;
+
+ if (pDE->m_evalType == DB_IPCE_FET_NEW_OBJECT)
+ {
+ ValidateFuncEvalReturnType(DB_IPCE_FET_NEW_OBJECT, pDE->m_resultType.GetMethodTable());
+ pDE->m_resultType.GetMethodTable()->EnsureInstanceActive();
+ newObj = AllocateObject(pDE->m_resultType.GetMethodTable());
+
+ //
+ // Note: we account for an extra argument in the count passed
+ // in. We use this to increase the space allocated for args,
+ // and we use it to control the number of args copied into
+ // those arrays below. Note: m_argCount already includes space
+ // for this.
+ //
+ allocArgCnt = pDE->m_argCount + 1;
+ }
+ else
+ {
+ allocArgCnt = pDE->m_argCount;
+ }
+
+ //
+ // Validate the argument count with mSig.
+ //
+ if (allocArgCnt != (mSig.NumFixedArgs() + (staticMethod ? 0 : 1)))
+ {
+ COMPlusThrow(kTargetParameterCountException, W("Arg_ParmCnt"));
+ }
+
+ //
+ // Do Step 1d - Gather information about the method that will be called.
+ //
+ // An array to hold information about the parameters to be passed. This is
+ // all the information we need to gather before entering the GCX_FORBID area.
+ //
+ DebuggerIPCE_FuncEvalArgData *argData = pDE->GetArgData();
+
+ MethodDesc *pUnboxedMD = pDE->m_md;
+ BOOL fHasRetBuffArg;
+ BOOL fHasNonStdByValReturn;
+ TypeHandle RetValueType;
+
+ BoxFuncEvalThisParameter(pDE,
+ argData,
+ pMaybeInteriorPtrArray,
+ pObjectRefArray
+ DEBUG_ARG(pDataLocationArray)
+ );
+
+ GatherFuncEvalMethodInfo(pDE,
+ mSig,
+ argData,
+ &pUnboxedMD,
+ pObjectRefArray,
+ pBufferForArgsArray,
+ &fHasRetBuffArg,
+ &fHasNonStdByValReturn,
+ &RetValueType
+ DEBUG_ARG(pDataLocationArray)
+ );
+
+ //
+ // Do Step 1e - Gather info from runtime about args (may trigger a GC).
+ //
+ SIZE_T cbAllocSize;
+ if (!(ClrSafeInt<SIZE_T>::multiply(pDE->m_argCount, sizeof(FuncEvalArgInfo), cbAllocSize)) ||
+ (cbAllocSize != (size_t)(cbAllocSize)))
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+ FuncEvalArgInfo * pFEArgInfo = (FuncEvalArgInfo *)_alloca(cbAllocSize);
+ memset(pFEArgInfo, 0, cbAllocSize);
+
+ GatherFuncEvalArgInfo(pDE, mSig, argData, pFEArgInfo);
+
+ //
+ // Do Step 1f - Box or unbox arguments one at a time, placing newly boxed items into
+ // pObjectRefArray immediately after creating them.
+ //
+ BoxFuncEvalArguments(pDE,
+ argData,
+ pFEArgInfo,
+ pMaybeInteriorPtrArray,
+ pObjectRefArray
+ DEBUG_ARG(pDataLocationArray)
+ );
+
+#ifdef _DEBUG
+ if (!RetValueType.IsNull())
+ {
+ _ASSERTE(RetValueType.IsValueType());
+ }
+#endif
+
+ //
+ // Do Step 1g - Pre-allocate any return value object.
+ //
+ OBJECTREF retObject = NULL;
+ GCPROTECT_BEGIN(retObject);
+
+ if ((pDE->m_evalType != DB_IPCE_FET_NEW_OBJECT) && !RetValueType.IsNull())
+ {
+ ValidateFuncEvalReturnType(pDE->m_evalType, RetValueType.GetMethodTable());
+ RetValueType.GetMethodTable()->EnsureInstanceActive();
+ retObject = AllocateObject(RetValueType.GetMethodTable());
+ }
+
+ //
+ // Do Step 1h - Copy into scratch buffer all enregistered arguments, and
+ // ByRef literals.
+ //
+ CopyArgsToBuffer(pDE,
+ argData,
+ pFEArgInfo,
+ pBufferForArgsArray
+ DEBUG_ARG(pDataLocationArray)
+ );
+
+ //
+ // We presume that the function has a return buffer. This assumption gets squeezed out
+ // when we pack the argument array.
+ //
+ allocArgCnt++;
+
+ LOG((LF_CORDB, LL_EVERYTHING,
+ "Func eval for %s::%s: allocArgCnt=%d\n",
+ pDE->m_md->m_pszDebugClassName,
+ pDE->m_md->m_pszDebugMethodName,
+ allocArgCnt));
+
+ MethodDescCallSite funcToEval(pDE->m_md, pDE->m_targetCodeAddr);
+
+ //
+ // Do Step 1i - Create and pack argument array for managed function call.
+ //
+ // Allocate space for argument stack
+ //
+ if ((!ClrSafeInt<SIZE_T>::multiply(allocArgCnt, sizeof(ARG_SLOT), cbAllocSize)) ||
+ (cbAllocSize != (size_t)(cbAllocSize)))
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+ ARG_SLOT * pArguments = (ARG_SLOT *)_alloca(cbAllocSize);
+ memset(pArguments, 0, cbAllocSize);
+
+ LPVOID pRetBuff = NULL;
+
+ PackArgumentArray(pDE,
+ argData,
+ pFEArgInfo,
+ pUnboxedMD,
+ RetValueType,
+ pObjectRefArray,
+ pMaybeInteriorPtrArray,
+ pBufferForArgsArray,
+ ppProtectedValueClasses,
+ newObj,
+#ifdef FEATURE_HFA
+ fHasRetBuffArg || fHasNonStdByValReturn,
+#else
+ fHasRetBuffArg,
+#endif
+ pArguments,
+ &pRetBuff
+ DEBUG_ARG(pDataLocationArray)
+ );
+
+ //
+ //
+ // Do Step 2 - Make the call!
+ //
+ //
+ FuncEvalWrapper(&funcToEval, pDE, pArguments, pCatcherStackAddr);
+ {
+
+ // We have now entered the zone where taking a GC is fatal until we get the
+ // return value all fixed up.
+ //
+ GCX_FORBID();
+
+
+ //
+ //
+ // Do Step 3 - Unpack results and update ByRef arguments.
+ //
+ //
+ //
+ LOG((LF_CORDB, LL_EVERYTHING, "FuncEval call has returned\n"));
+
+
+ // GC still can't happen until we get our return value out half way through the unpack function
+
+ UnpackFuncEvalResult(pDE,
+ newObj,
+ retObject,
+ RetValueType,
+ pRetBuff
+ );
+ }
+
+ UnpackFuncEvalArguments(pDE,
+ argData,
+ mSig,
+ staticMethod,
+ pObjectRefArray,
+ pMaybeInteriorPtrArray,
+ pByRefMaybeInteriorPtrArray,
+ pBufferForArgsArray
+ );
+
+ GCPROTECT_END(); // retObject
+ GCPROTECT_END(); // newObj
+}
+
+/*
+ * GCProtectArgsAndDoNormalFuncEval
+ *
+ * This routine is the primary entrypoint for normal func-evals. It implements the algorithm
+ * described at the top of this file, doing steps 1a and 1b itself, then calling DoNormalFuncEval
+ * to do the rest.
+ *
+ * Parameters:
+ * pDE - pointer to the DebuggerEval object being processed.
+ * pCatcherStackAddr - stack address to report as the Catch Handler Found location.
+ *
+ * Returns:
+ * None.
+ *
+ */
+static void GCProtectArgsAndDoNormalFuncEval(DebuggerEval *pDE,
+ BYTE *pCatcherStackAddr )
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+
+ INDEBUG(DataLocation pDataLocationArray[MAX_DATA_LOCATIONS_TRACKED]);
+
+ //
+ // An array to hold object ref args. This array is protected from GC's.
+ //
+ SIZE_T cbAllocSize;
+ if ((!ClrSafeInt<SIZE_T>::multiply(pDE->m_argCount, sizeof(OBJECTREF), cbAllocSize)) ||
+ (cbAllocSize != (size_t)(cbAllocSize)))
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+ OBJECTREF * pObjectRefArray = (OBJECTREF*)_alloca(cbAllocSize);
+ memset(pObjectRefArray, 0, cbAllocSize);
+ GCPROTECT_ARRAY_BEGIN(*pObjectRefArray, pDE->m_argCount);
+
+ //
+ // An array to hold values that may be pointers into a managed object. This array
+ // is protected from GCs.
+ //
+ if ((!ClrSafeInt<SIZE_T>::multiply(pDE->m_argCount, sizeof(void**), cbAllocSize)) ||
+ (cbAllocSize != (size_t)(cbAllocSize)))
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+ void ** pMaybeInteriorPtrArray = (void **)_alloca(cbAllocSize);
+ memset(pMaybeInteriorPtrArray, 0, cbAllocSize);
+ GCPROTECT_BEGININTERIOR_ARRAY(*pMaybeInteriorPtrArray, (UINT)(cbAllocSize/sizeof(OBJECTREF)));
+
+ //
+ // An array to hold values that may be pointers into a managed object. This array
+ // is protected from GCs. This array protects the address of the arguments while the
+ // pMaybeInteriorPtrArray protects the value of the arguments. We need to do this because
+ // of by ref arguments.
+ //
+ if ((!ClrSafeInt<SIZE_T>::multiply(pDE->m_argCount, sizeof(void**), cbAllocSize)) ||
+ (cbAllocSize != (size_t)(cbAllocSize)))
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+ void ** pByRefMaybeInteriorPtrArray = (void **)_alloca(cbAllocSize);
+ memset(pByRefMaybeInteriorPtrArray, 0, cbAllocSize);
+ GCPROTECT_BEGININTERIOR_ARRAY(*pByRefMaybeInteriorPtrArray, (UINT)(cbAllocSize/sizeof(OBJECTREF)));
+
+ //
+ // A buffer of temporary scratch space for things that do not need to be protected, or
+ // are protected for free (e.g. Handles).
+ //
+ if ((!ClrSafeInt<SIZE_T>::multiply(pDE->m_argCount, sizeof(INT64), cbAllocSize)) ||
+ (cbAllocSize != (size_t)(cbAllocSize)))
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+ INT64 *pBufferForArgsArray = (INT64*)_alloca(cbAllocSize);
+ memset(pBufferForArgsArray, 0, cbAllocSize);
+
+ FrameWithCookie<ProtectValueClassFrame> protectValueClassFrame;
+
+ //
+ // Initialize our tracking array
+ //
+ INDEBUG(memset(pDataLocationArray, 0, sizeof(DataLocation) * (MAX_DATA_LOCATIONS_TRACKED)));
+
+ {
+ GCX_FORBID();
+
+ //
+ // Do step 1a
+ //
+ GCProtectAllPassedArgs(pDE,
+ pObjectRefArray,
+ pMaybeInteriorPtrArray,
+ pByRefMaybeInteriorPtrArray,
+ pBufferForArgsArray
+ DEBUG_ARG(pDataLocationArray)
+ );
+
+ }
+
+ //
+ // Do step 1b: we can switch domains since everything is now protected.
+ // Note that before this point, it's unsafe to rely on pDE->m_module since it may be
+ // invalid due to an AD unload.
+ // All normal func evals should have an AppDomain specified.
+ //
+ _ASSERTE( pDE->m_appDomainId.m_dwId != 0 );
+ ENTER_DOMAIN_ID( pDE->m_appDomainId );
+
+ // Wrap everything in a EX_TRY so we catch any exceptions that could be thrown.
+ // Note that we don't let any thrown exceptions cross the AppDomain boundary because we don't
+ // want them to get marshalled.
+ EX_TRY
+ {
+ DoNormalFuncEval(
+ pDE,
+ pCatcherStackAddr,
+ pObjectRefArray,
+ pMaybeInteriorPtrArray,
+ pByRefMaybeInteriorPtrArray,
+ pBufferForArgsArray,
+ protectValueClassFrame.GetValueClassInfoList()
+ DEBUG_ARG(pDataLocationArray)
+ );
+ }
+ EX_CATCH
+ {
+ // We got an exception. Make the exception into our result.
+ OBJECTREF ppException = GET_THROWABLE();
+ GCX_FORBID();
+ RecordFuncEvalException( pDE, ppException);
+ }
+ // Note: we need to catch all exceptioins here because they all get reported as the result of
+ // the funceval. If a ThreadAbort occured other than for a funcEval abort, we'll re-throw it manually.
+ EX_END_CATCH(SwallowAllExceptions);
+
+ // Restore context
+ END_DOMAIN_TRANSITION;
+
+ protectValueClassFrame.Pop();
+
+ CleanUpTemporaryVariables(protectValueClassFrame.GetValueClassInfoList());
+
+ GCPROTECT_END(); // pByRefMaybeInteriorPtrArray
+ GCPROTECT_END(); // pMaybeInteriorPtrArray
+ GCPROTECT_END(); // pObjectRefArray
+ LOG((LF_CORDB, LL_EVERYTHING, "DoNormalFuncEval: returning...\n"));
+}
+
+
+void FuncEvalHijackRealWorker(DebuggerEval *pDE, Thread* pThread, FuncEvalFrame* pFEFrame)
+{
+ BYTE * pCatcherStackAddr = (BYTE*) pFEFrame;
+
+ // Handle normal func evals in DoNormalFuncEval
+ if ((pDE->m_evalType == DB_IPCE_FET_NEW_OBJECT) || (pDE->m_evalType == DB_IPCE_FET_NORMAL))
+ {
+ GCProtectArgsAndDoNormalFuncEval(pDE, pCatcherStackAddr);
+ LOG((LF_CORDB, LL_EVERYTHING, "DoNormalFuncEval has returned.\n"));
+ return;
+ }
+
+ // The method may be in a different AD than the thread.
+ // The RS already verified that all of the arguments are in the same appdomain as the function
+ // (because we can't verify it here).
+ // Note that this is exception safe, so we are guarenteed to be in the correct AppDomain when
+ // we leave this method.
+ // Before this, we can't safely use the DebuggerModule* since the domain may have been unloaded.
+ ENTER_DOMAIN_ID( pDE->m_appDomainId );
+
+ OBJECTREF newObj = NULL;
+ GCPROTECT_BEGIN(newObj);
+
+ // Wrap everything in a EX_TRY so we catch any exceptions that could be thrown.
+ // Note that we don't let any thrown exceptions cross the AppDomain boundary because we don't
+ // want them to get marshalled.
+ EX_TRY
+ {
+ DebuggerIPCE_TypeArgData *firstdata = pDE->GetTypeArgData();
+ DWORD nGenericArgs = pDE->m_genericArgsCount;
+
+ SIZE_T cbAllocSize;
+ if ((!ClrSafeInt<SIZE_T>::multiply(nGenericArgs, sizeof(TypeHandle *), cbAllocSize)) ||
+ (cbAllocSize != (size_t)(cbAllocSize)))
+ {
+ ThrowHR(COR_E_OVERFLOW);
+ }
+ TypeHandle *pGenericArgs = (nGenericArgs == 0) ? NULL : (TypeHandle *) _alloca(cbAllocSize);
+ //
+ // Snag the type arguments from the input and get the
+ // method desc that corresponds to the instantiated desc.
+ //
+ Debugger::TypeDataWalk walk(firstdata, pDE->m_genericArgsNodeCount);
+ walk.ReadTypeHandles(nGenericArgs, pGenericArgs);
+
+ // <TODO>better error message</TODO>
+ if (!walk.Finished())
+ COMPlusThrow(kArgumentException, W("Argument_InvalidGenericArg"));
+
+ switch (pDE->m_evalType)
+ {
+ case DB_IPCE_FET_NEW_OBJECT_NC:
+ {
+
+ // Find the class.
+ TypeHandle thClass = g_pEEInterface->LoadClass(pDE->m_debuggerModule->GetRuntimeModule(),
+ pDE->m_classToken);
+
+ if (thClass.IsNull())
+ COMPlusThrow(kArgumentNullException, W("ArgumentNull_Type"));
+
+ // Apply any type arguments
+ TypeHandle th =
+ (nGenericArgs == 0)
+ ? thClass
+ : g_pEEInterface->LoadInstantiation(pDE->m_debuggerModule->GetRuntimeModule(),
+ pDE->m_classToken, nGenericArgs, pGenericArgs);
+
+ if (th.IsNull() || th.ContainsGenericVariables())
+ COMPlusThrow(kArgumentException, W("Argument_InvalidGenericArg"));
+
+ // Run the Class Init for this type, if necessary.
+ MethodTable * pOwningMT = th.GetMethodTable();
+ pOwningMT->EnsureInstanceActive();
+ pOwningMT->CheckRunClassInitThrowing();
+
+ // Create a new instance of the class
+
+ ValidateFuncEvalReturnType(DB_IPCE_FET_NEW_OBJECT_NC, th.GetMethodTable());
+
+ newObj = AllocateObject(th.GetMethodTable());
+
+ // No exception, so it worked.
+ pDE->m_successful = true;
+
+ // So is the result type.
+ pDE->m_resultType = th;
+
+ //
+ // Box up all returned objects
+ //
+ pDE->m_retValueBoxing = Debugger::AllBoxed;
+
+ // Make a strong handle for the result.
+ OBJECTHANDLE oh = pDE->m_thread->GetDomain()->CreateStrongHandle(newObj);
+ pDE->m_result = (INT64)(LONG_PTR)oh;
+ pDE->m_vmObjectHandle = VMPTR_OBJECTHANDLE::MakePtr(oh);
+
+ break;
+ }
+
+ case DB_IPCE_FET_NEW_STRING:
+ {
+ // Create the string. m_argData is not necessarily null terminated...
+ // The numeration parameter represents the string length, not the buffer size, but
+ // we have passed the buffer size across to copy our data properly, so must divide back out.
+ // NewString will return NULL if pass null, but want an empty string in that case, so
+ // just create an EmptyString explicitly.
+ if ((pDE->m_argData == NULL) || (pDE->m_stringSize == 0))
+ {
+ newObj = StringObject::GetEmptyString();
+ }
+ else
+ {
+ newObj = StringObject::NewString(pDE->GetNewStringArgData(), (int)(pDE->m_stringSize/sizeof(WCHAR)));
+ }
+
+ // No exception, so it worked.
+ pDE->m_successful = true;
+
+ // Result type is, of course, a string.
+ pDE->m_resultType = newObj->GetTypeHandle();
+
+ // Place the result in a strong handle to protect it from a collection.
+ OBJECTHANDLE oh = pDE->m_thread->GetDomain()->CreateStrongHandle(newObj);
+ pDE->m_result = (INT64)(LONG_PTR)oh;
+ pDE->m_vmObjectHandle = VMPTR_OBJECTHANDLE::MakePtr(oh);
+
+ break;
+ }
+
+ case DB_IPCE_FET_NEW_ARRAY:
+ {
+ // <TODO>@todo: We're only gonna handle SD arrays for right now.</TODO>
+ if (pDE->m_arrayRank > 1)
+ COMPlusThrow(kRankException, W("Rank_MultiDimNotSupported"));
+
+ // Grab the elementType from the arg/data area.
+ _ASSERTE(nGenericArgs == 1);
+ TypeHandle th = pGenericArgs[0];
+
+ CorElementType et = th.GetSignatureCorElementType();
+ // Gotta be a primitive, class, or System.Object.
+ if (((et < ELEMENT_TYPE_BOOLEAN) || (et > ELEMENT_TYPE_R8)) &&
+ !IsElementTypeSpecial(et))
+ {
+ COMPlusThrow(kArgumentOutOfRangeException, W("ArgumentOutOfRange_Enum"));
+ }
+
+ // Grab the dims from the arg/data area. These come after the type arguments.
+ SIZE_T *dims;
+ dims = (SIZE_T*) (firstdata + pDE->m_genericArgsNodeCount);
+
+ if (IsElementTypeSpecial(et))
+ {
+ newObj = AllocateObjectArray((DWORD)dims[0], th);
+ }
+ else
+ {
+ // Create a simple array. Note: we can only do this type of create here due to the checks above.
+ newObj = AllocatePrimitiveArray(et, (DWORD)dims[0]);
+ }
+
+ // No exception, so it worked.
+ pDE->m_successful = true;
+
+ // Result type is, of course, the type of the array.
+ pDE->m_resultType = newObj->GetTypeHandle();
+
+ // Place the result in a strong handle to protect it from a collection.
+ OBJECTHANDLE oh = pDE->m_thread->GetDomain()->CreateStrongHandle(newObj);
+ pDE->m_result = (INT64)(LONG_PTR)oh;
+ pDE->m_vmObjectHandle = VMPTR_OBJECTHANDLE::MakePtr(oh);
+
+ break;
+ }
+
+ default:
+ _ASSERTE(!"Invalid eval type!");
+ }
+ }
+ EX_CATCH
+ {
+ // We got an exception. Make the exception into our result.
+ OBJECTREF ppException = GET_THROWABLE();
+ GCX_FORBID();
+ RecordFuncEvalException( pDE, ppException);
+ }
+ // Note: we need to catch all exceptioins here because they all get reported as the result of
+ // the funceval. If a ThreadAbort occured other than for a funcEval abort, we'll re-throw it manually.
+ EX_END_CATCH(SwallowAllExceptions);
+
+ GCPROTECT_END();
+
+ //
+ // Restore context
+ //
+ END_DOMAIN_TRANSITION;
+
+}
+
+//
+// FuncEvalHijackWorker is the function that managed threads start executing in order to perform a function
+// evaluation. Control is transfered here on the proper thread by hijacking that that's IP to this method in
+// Debugger::FuncEvalSetup. This function can also be called directly by a Runtime thread that is stopped sending a
+// first or second chance exception to the Right Side.
+//
+// The DebuggerEval object may get deleted by the helper thread doing a CleanupFuncEval while this thread is blocked
+// sending the eval complete.
+void * STDCALL FuncEvalHijackWorker(DebuggerEval *pDE)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(pDE));
+ }
+ CONTRACTL_END;
+
+
+
+ Thread *pThread = NULL;
+ CONTEXT *filterContext = NULL;
+
+ {
+ GCX_FORBID();
+
+ LOG((LF_CORDB, LL_INFO100000, "D:FEHW for pDE:%08x evalType:%d\n", pDE, pDE->m_evalType));
+
+ pThread = GetThread();
+
+#ifndef DACCESS_COMPILE
+#ifdef _DEBUG
+ //
+ // Flush all debug tracking information for this thread on object refs as it
+ // only approximates proper tracking and may have stale data, resulting in false
+ // positives. We dont want that as func-eval runs a lot, so flush them now.
+ //
+ g_pEEInterface->ObjectRefFlush(pThread);
+#endif
+#endif
+
+ if (!pDE->m_evalDuringException)
+ {
+ //
+ // From this point forward we use FORBID regions to guard against GCs.
+ // Refer to code:Debugger::FuncEvalSetup to see the increment was done.
+ //
+ g_pDebugger->DecThreadsAtUnsafePlaces();
+ }
+
+ // Preemptive GC is disabled at the start of this method.
+ _ASSERTE(g_pEEInterface->IsPreemptiveGCDisabled());
+
+ DebuggerController::DispatchFuncEvalEnter(pThread);
+
+
+ // If we've got a filter context still installed, then remove it while we do the work...
+ filterContext = g_pEEInterface->GetThreadFilterContext(pDE->m_thread);
+
+ if (filterContext)
+ {
+ _ASSERTE(pDE->m_evalDuringException);
+ g_pEEInterface->SetThreadFilterContext(pDE->m_thread, NULL);
+ }
+
+ }
+
+ //
+ // Special handling for a re-abort eval. We don't setup a EX_TRY or try to lookup a function to call. All we do
+ // is have this thread abort itself.
+ //
+ if (pDE->m_evalType == DB_IPCE_FET_RE_ABORT)
+ {
+ //
+ // Push our FuncEvalFrame. The return address is equal to the IP in the saved context in the DebuggerEval. The
+ // m_Datum becomes the ptr to the DebuggerEval. The frame address also serves as the address of the catch-handler-found.
+ //
+ FrameWithCookie<FuncEvalFrame> FEFrame(pDE, GetIP(&pDE->m_context), false);
+ FEFrame.Push();
+
+ pDE->m_thread->UserAbort(pDE->m_requester, EEPolicy::TA_Safe, INFINITE, Thread::UAC_Normal);
+ _ASSERTE(!"Should not return from UserAbort here!");
+ return NULL;
+ }
+
+ //
+ // We cannot scope the following in a GCX_FORBID(), but we would like to. But we need the frames on the
+ // stack here, so they must never go out of scope.
+ //
+
+ //
+ // Push our FuncEvalFrame. The return address is equal to the IP in the saved context in the DebuggerEval. The
+ // m_Datum becomes the ptr to the DebuggerEval. The frame address also serves as the address of the catch-handler-found.
+ //
+ FrameWithCookie<FuncEvalFrame> FEFrame(pDE, GetIP(&pDE->m_context), true);
+ FEFrame.Push();
+
+ // On ARM the single step flag is per-thread and not per context. We need to make sure that the SS flag is cleared
+ // for the funceval, and that the state is back to what it should be after the funceval completes.
+#ifdef _TARGET_ARM_
+ bool ssEnabled = pDE->m_thread->IsSingleStepEnabled();
+ if (ssEnabled)
+ pDE->m_thread->DisableSingleStep();
+#endif
+
+ FuncEvalHijackRealWorker(pDE, pThread, &FEFrame);
+
+#ifdef _TARGET_ARM_
+ if (ssEnabled)
+ pDE->m_thread->EnableSingleStep();
+#endif
+
+
+
+ LOG((LF_CORDB, LL_EVERYTHING, "FuncEval has finished its primary work.\n"));
+
+ //
+ // The func-eval is now completed, successfully or with failure, aborted or run-to-completion.
+ //
+ pDE->m_completed = true;
+
+ if (pDE->m_thread->IsAbortRequested())
+ {
+ //
+ // Check if an unmanaged thread tried to also abort this thread while we
+ // were doing the func-eval, then that kind we want to rethrow. The check
+ // versus m_aborted is for the case where the FE was aborted, we caught that,
+ // then cleared the FEAbort request, but there is still an outstanding abort
+ // - then it must be a user abort.
+ //
+ if ((pDE->m_aborting == DebuggerEval::FE_ABORT_NONE) || pDE->m_aborted)
+ {
+ pDE->m_rethrowAbortException = true;
+ }
+
+ //
+ // Reset the abort request if a func-eval abort was submitted, but the func-eval completed
+ // before the abort could take place, we want to make sure we do not throw an abort exception
+ // in this case.
+ //
+ if (pDE->m_aborting != DebuggerEval::FE_ABORT_NONE)
+ {
+ pDE->m_thread->UserResetAbort(Thread::TAR_FuncEval);
+ }
+
+ }
+
+ // Codepitching can hijack our frame's return address. That means that we'll need to update PC in our saved context
+ // so that when its restored, its like we've returned to the codepitching hijack. At this point, the old value of
+ // EIP is worthless anyway.
+ if (!pDE->m_evalDuringException)
+ {
+ SetIP(&pDE->m_context, (SIZE_T)FEFrame.GetReturnAddress());
+ }
+
+ //
+ // Disable all steppers and breakpoints created during the func-eval
+ //
+ DebuggerController::DispatchFuncEvalExit(pThread);
+
+ void *dest = NULL;
+
+ if (!pDE->m_evalDuringException)
+ {
+ // Signal to the helper thread that we're done with our func eval. Start by creating a DebuggerFuncEvalComplete
+ // object. Give it an address at which to create the patch, which is a chunk of memory inside of our
+ // DebuggerEval big enough to hold a breakpoint instruction.
+#ifdef _TARGET_ARM_
+ dest = (BYTE*)((DWORD)&(pDE->m_breakpointInstruction) | THUMB_CODE);
+#else
+ dest = &(pDE->m_breakpointInstruction);
+#endif
+
+ // Here is kind of a cheat... we make sure that the address that we patch and jump to is actually also the ptr
+ // to our DebuggerEval. This works because m_breakpointInstruction is the first field of the DebuggerEval
+ // struct.
+#ifdef _TARGET_ARM_
+ _ASSERTE((((DWORD)dest) & ~THUMB_CODE) == (DWORD)pDE);
+#else
+ _ASSERTE(dest == pDE);
+#endif
+
+ //
+ // The created object below sets up itself as a hijack and will destroy itself when the hijack and work
+ // is done.
+ //
+
+ DebuggerFuncEvalComplete *comp;
+ comp = new (interopsafe) DebuggerFuncEvalComplete(pThread, dest);
+ _ASSERTE(comp != NULL); // would have thrown
+
+ // Pop the FuncEvalFrame now that we're pretty much done. Make sure we
+ // don't pop the frame too early. Because GC can be triggered in our grabbing of
+ // Debugger lock. If we pop the FE frame without setting back thread filter context,
+ // the frames left uncrawlable.
+ //
+ FEFrame.Pop();
+ }
+ else
+ {
+ // We don't have to setup any special hijacks to return from here when we've been processing during an
+ // exception. We just go ahead and send the FuncEvalComplete event over now. Don't forget to enable/disable PGC
+ // around the call...
+ _ASSERTE(g_pEEInterface->IsPreemptiveGCDisabled());
+
+ if (filterContext != NULL)
+ {
+ g_pEEInterface->SetThreadFilterContext(pDE->m_thread, filterContext);
+ }
+
+ // Pop the FuncEvalFrame now that we're pretty much done.
+ FEFrame.Pop();
+
+
+ {
+ //
+ // This also grabs the debugger lock, so we can atomically check if a detach has
+ // happened.
+ //
+ SENDIPCEVENT_BEGIN(g_pDebugger, pDE->m_thread);
+
+ if ((pDE->m_thread->GetDomain() != NULL) && pDE->m_thread->GetDomain()->IsDebuggerAttached())
+ {
+
+ if (CORDebuggerAttached())
+ {
+ g_pDebugger->FuncEvalComplete(pDE->m_thread, pDE);
+
+ g_pDebugger->SyncAllThreads(SENDIPCEVENT_PtrDbgLockHolder);
+ }
+
+ }
+
+ SENDIPCEVENT_END;
+ }
+ }
+
+
+ // pDE may now point to deleted memory if the helper thread did a CleanupFuncEval while we
+ // were blocked waiting for a continue after the func-eval complete.
+
+ // We return the address that we want to resume executing at.
+ return dest;
+
+}
+
+
+#if defined(WIN64EXCEPTIONS)
+
+EXTERN_C EXCEPTION_DISPOSITION
+FuncEvalHijackPersonalityRoutine(IN PEXCEPTION_RECORD pExceptionRecord
+ WIN64_ARG(IN ULONG64 MemoryStackFp)
+ NOT_WIN64_ARG(IN ULONG32 MemoryStackFp),
+ IN OUT PCONTEXT pContextRecord,
+ IN OUT PDISPATCHER_CONTEXT pDispatcherContext
+ )
+{
+ DebuggerEval* pDE = NULL;
+#if defined(_TARGET_AMD64_)
+ pDE = *(DebuggerEval**)(pDispatcherContext->EstablisherFrame);
+#elif defined(_TARGET_ARM_)
+ // on ARM the establisher frame is the SP of the caller of FuncEvalHijack, on other platforms it's FuncEvalHijack's SP.
+ // in FuncEvalHijack we allocate 8 bytes of stack space and then store R0 at the current SP, so if we subtract 8 from
+ // the establisher frame we can get the stack location where R0 was stored.
+ pDE = *(DebuggerEval**)(pDispatcherContext->EstablisherFrame - 8);
+#else
+ _ASSERTE(!"NYI - FuncEvalHijackPersonalityRoutine()");
+#endif
+
+ FixupDispatcherContext(pDispatcherContext, &(pDE->m_context), pContextRecord);
+
+ // Returning ExceptionCollidedUnwind will cause the OS to take our new context record and
+ // dispatcher context and restart the exception dispatching on this call frame, which is
+ // exactly the behavior we want.
+ return ExceptionCollidedUnwind;
+}
+
+
+#endif // WIN64EXCEPTIONS
+
+#endif // ifndef DACCESS_COMPILE
diff --git a/src/debug/ee/functioninfo.cpp b/src/debug/ee/functioninfo.cpp
new file mode 100644
index 0000000000..af52ab5c9d
--- /dev/null
+++ b/src/debug/ee/functioninfo.cpp
@@ -0,0 +1,2474 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+
+//
+// File: DebuggerModule.cpp
+//
+// Stuff for tracking DebuggerModules.
+//
+//*****************************************************************************
+
+#include "stdafx.h"
+#include "../inc/common.h"
+#include "perflog.h"
+#include "eeconfig.h" // This is here even for retail & free builds...
+#include "vars.hpp"
+#include <limits.h>
+#include "ilformatter.h"
+#include "debuginfostore.h"
+#include "../../vm/methoditer.h"
+
+#ifndef DACCESS_COMPILE
+
+bool DbgIsSpecialILOffset(DWORD offset)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (offset == (ULONG) ICorDebugInfo::PROLOG ||
+ offset == (ULONG) ICorDebugInfo::EPILOG ||
+ offset == (ULONG) ICorDebugInfo::NO_MAPPING);
+}
+
+// Helper to use w/ the debug stores.
+BYTE* InteropSafeNew(void * , size_t cBytes)
+{
+ BYTE * p = new (interopsafe, nothrow) BYTE[cBytes];
+ return p;
+}
+
+
+//
+// This is only fur internal debugging.
+//
+#ifdef LOGGING
+static void _dumpVarNativeInfo(ICorDebugInfo::NativeVarInfo* vni)
+{
+ WRAPPER_NO_CONTRACT;
+
+ LOG((LF_CORDB, LL_INFO1000000, "Var %02d: 0x%04x-0x%04x vlt=",
+ vni->varNumber,
+ vni->startOffset, vni->endOffset,
+ vni->loc.vlType));
+
+ switch (vni->loc.vlType)
+ {
+ case ICorDebugInfo::VLT_REG:
+ LOG((LF_CORDB, LL_INFO1000000, "REG reg=%d\n", vni->loc.vlReg.vlrReg));
+ break;
+
+ case ICorDebugInfo::VLT_REG_BYREF:
+ LOG((LF_CORDB, LL_INFO1000000, "REG_BYREF reg=%d\n", vni->loc.vlReg.vlrReg));
+ break;
+
+ case ICorDebugInfo::VLT_STK:
+ LOG((LF_CORDB, LL_INFO1000000, "STK reg=%d off=0x%04x (%d)\n",
+ vni->loc.vlStk.vlsBaseReg,
+ vni->loc.vlStk.vlsOffset,
+ vni->loc.vlStk.vlsOffset));
+ break;
+
+ case ICorDebugInfo::VLT_STK_BYREF:
+ LOG((LF_CORDB, LL_INFO1000000, "STK_BYREF reg=%d off=0x%04x (%d)\n",
+ vni->loc.vlStk.vlsBaseReg,
+ vni->loc.vlStk.vlsOffset,
+ vni->loc.vlStk.vlsOffset));
+ break;
+
+ case ICorDebugInfo::VLT_REG_REG:
+ LOG((LF_CORDB, LL_INFO1000000, "REG_REG reg1=%d reg2=%d\n",
+ vni->loc.vlRegReg.vlrrReg1,
+ vni->loc.vlRegReg.vlrrReg2));
+ break;
+
+ case ICorDebugInfo::VLT_REG_STK:
+ LOG((LF_CORDB, LL_INFO1000000, "REG_STK reg=%d basereg=%d off=0x%04x (%d)\n",
+ vni->loc.vlRegStk.vlrsReg,
+ vni->loc.vlRegStk.vlrsStk.vlrssBaseReg,
+ vni->loc.vlRegStk.vlrsStk.vlrssOffset,
+ vni->loc.vlRegStk.vlrsStk.vlrssOffset));
+ break;
+
+ case ICorDebugInfo::VLT_STK_REG:
+ LOG((LF_CORDB, LL_INFO1000000, "STK_REG basereg=%d off=0x%04x (%d) reg=%d\n",
+ vni->loc.vlStkReg.vlsrStk.vlsrsBaseReg,
+ vni->loc.vlStkReg.vlsrStk.vlsrsOffset,
+ vni->loc.vlStkReg.vlsrStk.vlsrsOffset,
+ vni->loc.vlStkReg.vlsrReg));
+ break;
+
+ case ICorDebugInfo::VLT_STK2:
+ LOG((LF_CORDB, LL_INFO1000000, "STK_STK reg=%d off=0x%04x (%d)\n",
+ vni->loc.vlStk2.vls2BaseReg,
+ vni->loc.vlStk2.vls2Offset,
+ vni->loc.vlStk2.vls2Offset));
+ break;
+
+ case ICorDebugInfo::VLT_FPSTK:
+ LOG((LF_CORDB, LL_INFO1000000, "FPSTK reg=%d\n",
+ vni->loc.vlFPstk.vlfReg));
+ break;
+
+ case ICorDebugInfo::VLT_FIXED_VA:
+ LOG((LF_CORDB, LL_INFO1000000, "FIXED_VA offset=%d (%d)\n",
+ vni->loc.vlFixedVarArg.vlfvOffset,
+ vni->loc.vlFixedVarArg.vlfvOffset));
+ break;
+
+
+ default:
+ LOG((LF_CORDB, LL_INFO1000000, "???\n"));
+ break;
+ }
+}
+#endif
+
+#if defined(WIN64EXCEPTIONS)
+void DebuggerJitInfo::InitFuncletAddress()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ m_funcletCount = (int)g_pEEInterface->GetFuncletStartOffsets((const BYTE*)m_addrOfCode, NULL, 0);
+
+ if (m_funcletCount == 0)
+ {
+ _ASSERTE(m_rgFunclet == NULL);
+ return;
+ }
+
+ m_rgFunclet = (DWORD*)(new (interopsafe, nothrow) DWORD[m_funcletCount]);
+
+ // All bets are off for stepping this method.
+ if (m_rgFunclet == NULL)
+ {
+ m_funcletCount = 0;
+ return;
+ }
+
+ // This will get the offsets relative to the parent method start as if
+ // the funclet was in contiguous memory (i.e. not hot/cold split).
+ g_pEEInterface->GetFuncletStartOffsets((const BYTE*)m_addrOfCode, m_rgFunclet, m_funcletCount);
+}
+
+//
+// DebuggerJitInfo::GetFuncletOffsetByIndex()
+//
+// Given a funclet index, return its starting offset.
+//
+// parameters: index - index of the funclet
+//
+// return value: starting offset of the specified funclet, or -1 if the index is invalid
+//
+DWORD DebuggerJitInfo::GetFuncletOffsetByIndex(int index)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (index < 0 || index >= m_funcletCount)
+ {
+ return (-1);
+ }
+
+ return m_rgFunclet[index];
+}
+
+//
+// DebuggerJitInfo::GetFuncletIndex()
+//
+// Given an offset or an absolute address, return the index of the funclet containing it.
+//
+// parameters: offsetOrAddr - an offset or an absolute address in the method
+// mode - whether the first argument is an offset or an absolute address
+//
+// return value: the index of the funclet containing the specified offset or address,
+// or -1 if it's invalid
+//
+int DebuggerJitInfo::GetFuncletIndex(CORDB_ADDRESS offsetOrAddr, GetFuncletIndexMode mode)
+{
+ WRAPPER_NO_CONTRACT;
+
+ DWORD offset = 0;
+ if (mode == GFIM_BYOFFSET)
+ {
+ offset = (DWORD)offsetOrAddr;
+ }
+
+ // If the address doesn't fall in any of the funclets (or if the
+ // method doesn't have any funclet at all), then return PARENT_METHOD_INDEX.
+ // <TODO>
+ // What if there's an overflow?
+ // </TODO>
+ if (!m_codeRegionInfo.IsMethodAddress((const BYTE *)(mode == GFIM_BYOFFSET ? (size_t)m_codeRegionInfo.OffsetToAddress(offset) : offsetOrAddr)))
+ {
+ return PARENT_METHOD_INDEX;
+ }
+
+ if ( ( m_funcletCount == 0 ) ||
+ ( (mode == GFIM_BYOFFSET) && (offset < m_rgFunclet[0]) ) ||
+ ( (mode == GFIM_BYADDRESS) && (offsetOrAddr < (size_t)m_codeRegionInfo.OffsetToAddress(m_rgFunclet[0])) ) )
+ {
+ return PARENT_METHOD_INDEX;
+ }
+
+ for (int i = 0; i < m_funcletCount; i++)
+ {
+ if (i == (m_funcletCount - 1))
+ {
+ return i;
+ }
+ else if ( ( (mode == GFIM_BYOFFSET) && (offset < m_rgFunclet[i+1]) ) ||
+ ( (mode == GFIM_BYADDRESS) && (offsetOrAddr < (size_t)m_codeRegionInfo.OffsetToAddress(m_rgFunclet[i+1])) ) )
+ {
+ return i;
+ }
+ }
+
+ UNREACHABLE();
+}
+
+#endif // WIN64EXCEPTIONS
+
+// It is entirely possible that we have multiple sequence points for the
+// same IL offset (because of funclets, optimization, etc.). Just to be
+// uniform in all cases, let's return the sequence point with the smallest
+// native offset if fWantFirst is TRUE.
+#if defined(WIN64EXCEPTIONS)
+#define ADJUST_MAP_ENTRY(_map, _wantFirst) \
+ if ((_wantFirst)) \
+ for ( ; (_map) > m_sequenceMap && (((_map)-1)->ilOffset == (_map)->ilOffset); (_map)--); \
+ else \
+ for ( ; (_map) < m_sequenceMap + (m_sequenceMapCount-1) && (((_map)+1)->ilOffset == (_map)->ilOffset); (_map)++);
+#else
+#define ADJUST_MAP_ENTRY(_map, _wantFirst)
+#endif // _WIN64
+
+DebuggerJitInfo::DebuggerJitInfo(DebuggerMethodInfo *minfo, MethodDesc *fd) :
+ m_fd(fd),
+ m_pLoaderModule(fd->GetLoaderModule()),
+ m_jitComplete(false),
+#ifdef EnC_SUPPORTED
+ m_encBreakpointsApplied(false),
+#endif //EnC_SUPPORTED
+ m_methodInfo(minfo),
+ m_addrOfCode(NULL),
+ m_sizeOfCode(0), m_prevJitInfo(NULL), m_nextJitInfo(NULL),
+ m_lastIL(0),
+ m_sequenceMap(NULL),
+ m_sequenceMapCount(0),
+ m_callsiteMap(NULL),
+ m_callsiteMapCount(0),
+ m_sequenceMapSorted(false),
+ m_varNativeInfo(NULL), m_varNativeInfoCount(0),
+ m_fAttemptInit(false)
+#if defined(WIN64EXCEPTIONS)
+ ,m_rgFunclet(NULL)
+ , m_funcletCount(0)
+#endif // defined(WIN64EXCEPTIONS)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // A DJI is just the debugger's cache of interesting information +
+ // various debugger-specific state for a method (like Enc).
+ // So only be createing DJIs when a debugger is actually attached.
+ // The profiler also piggy-backs on the DJIs.
+ // @Todo - the managed stackwalker in the BCL also builds on DJIs.
+ //_ASSERTE(CORDebuggerAttached() || CORProfilerPresent());
+
+ _ASSERTE(minfo);
+ m_encVersion = minfo->GetCurrentEnCVersion();
+ _ASSERTE(m_encVersion >= CorDB_DEFAULT_ENC_FUNCTION_VERSION);
+ LOG((LF_CORDB,LL_EVERYTHING, "DJI::DJI : created at 0x%x\n", this));
+
+ // Debugger doesn't track LightWeight codegen methods.
+ // We should never even be creating a DJI for one.
+ _ASSERTE(!m_fd->IsDynamicMethod());
+}
+
+DebuggerILToNativeMap *DebuggerJitInfo::MapILOffsetToMapEntry(SIZE_T offset, BOOL *exact, BOOL fWantFirst)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK; // GetSequenceMapCount calls LazyInitBounds() which can eventually
+ // call ExecutionManager::IncrementReader
+ }
+ CONTRACTL_END;
+
+ // Ideally we should be able to assert this, since the binary search in this function
+ // assumes that the sequence points are sorted by IL offset (NO_MAPPING, PROLOG, and EPILOG
+ // are actually -1, -2, and -3, respectively). However, the sequence points in pdb's use
+ // -1 to mean "end of the method", which is different from our semantics of using 0.
+ // _ASSERTE(offset != NO_MAPPING && offset != PROLOG && offset != EPILOG);
+
+ //
+ // Binary search for matching map element.
+ //
+
+ DebuggerILToNativeMap *mMin = GetSequenceMap();
+ DebuggerILToNativeMap *mMax = mMin + GetSequenceMapCount();
+
+ _ASSERTE(m_sequenceMapSorted);
+ _ASSERTE( mMin < mMax ); //otherwise we have no code
+
+ if (exact)
+ {
+ *exact = FALSE;
+ }
+
+ if (mMin)
+ {
+ while (mMin + 1 < mMax)
+ {
+ _ASSERTE(mMin>=m_sequenceMap);
+ DebuggerILToNativeMap *mMid = mMin + ((mMax - mMin)>>1);
+ _ASSERTE(mMid>=m_sequenceMap);
+
+ if (offset == mMid->ilOffset)
+ {
+ if (exact)
+ {
+ *exact = TRUE;
+ }
+ ADJUST_MAP_ENTRY(mMid, fWantFirst);
+ return mMid;
+ }
+ else if (offset < mMid->ilOffset && mMid->ilOffset != (ULONG) ICorDebugInfo::PROLOG)
+ {
+ mMax = mMid;
+ }
+ else
+ {
+ mMin = mMid;
+ }
+ }
+
+ if (exact && offset == mMin->ilOffset)
+ {
+ *exact = TRUE;
+ }
+ ADJUST_MAP_ENTRY(mMin, fWantFirst);
+ }
+ return mMin;
+}
+
+void DebuggerJitInfo::InitILToNativeOffsetIterator(ILToNativeOffsetIterator &iterator, SIZE_T ilOffset)
+{
+ WRAPPER_NO_CONTRACT;
+
+ iterator.Init(this, ilOffset);
+}
+
+
+DebuggerJitInfo::NativeOffset DebuggerJitInfo::MapILOffsetToNative(DebuggerJitInfo::ILOffset ilOffset)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ NativeOffset resultOffset;
+
+ DebuggerILToNativeMap *map = MapILOffsetToMapEntry(ilOffset.m_ilOffset, &(resultOffset.m_fExact));
+
+#if defined(WIN64EXCEPTIONS)
+ // See if we want the map entry for the parent.
+ if (ilOffset.m_funcletIndex <= PARENT_METHOD_INDEX)
+ {
+#endif // _WIN64
+ PREFIX_ASSUME( map != NULL );
+ LOG((LF_CORDB, LL_INFO10000, "DJI::MILOTN: ilOff 0x%x to nat 0x%x exact:0x%x (Entry IL Off:0x%x)\n",
+ ilOffset.m_ilOffset, map->nativeStartOffset, resultOffset.m_fExact, map->ilOffset));
+
+ resultOffset.m_nativeOffset = map->nativeStartOffset;
+
+#if defined(WIN64EXCEPTIONS)
+ }
+ else
+ {
+ // funcletIndex is guaranteed to be >= 0 at this point.
+ if (ilOffset.m_funcletIndex > (m_funcletCount - 1))
+ {
+ resultOffset.m_fExact = FALSE;
+ resultOffset.m_nativeOffset = ((SIZE_T)-1);
+ }
+ else
+ {
+ // Initialize the funclet range.
+ // ASSUMES that funclets are contiguous which they currently are...
+ DWORD funcletStartOffset = GetFuncletOffsetByIndex(ilOffset.m_funcletIndex);
+ DWORD funcletEndOffset;
+ if (ilOffset.m_funcletIndex < (m_funcletCount - 1))
+ {
+ funcletEndOffset = GetFuncletOffsetByIndex(ilOffset.m_funcletIndex + 1);
+ }
+ else
+ {
+ funcletEndOffset = (DWORD)m_sizeOfCode;
+ }
+
+ SIZE_T ilTargetOffset = map->ilOffset;
+
+ DebuggerILToNativeMap *mapEnd = GetSequenceMap() + GetSequenceMapCount();
+
+ for (; map < mapEnd && map->ilOffset == ilTargetOffset; map++)
+ {
+ if ((map->nativeStartOffset >= funcletStartOffset) &&
+ (map->nativeStartOffset < funcletEndOffset))
+ {
+ // This is the normal case where the start offset falls in
+ // the range of the funclet.
+ resultOffset.m_nativeOffset = map->nativeStartOffset;
+ break;
+ }
+ }
+
+ if (map == mapEnd || map->ilOffset != ilTargetOffset)
+ {
+ resultOffset.m_fExact = FALSE;
+ resultOffset.m_nativeOffset = ((SIZE_T)-1);
+ }
+ }
+ }
+#endif // WIN64EXCEPTIONS
+
+ return resultOffset;
+}
+
+
+DebuggerJitInfo::ILToNativeOffsetIterator::ILToNativeOffsetIterator()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_dji = NULL;
+ m_currentILOffset.m_ilOffset = INVALID_IL_OFFSET;
+#ifdef WIN64EXCEPTIONS
+ m_currentILOffset.m_funcletIndex = PARENT_METHOD_INDEX;
+#endif
+}
+
+void DebuggerJitInfo::ILToNativeOffsetIterator::Init(DebuggerJitInfo* dji, SIZE_T ilOffset)
+{
+ WRAPPER_NO_CONTRACT;
+
+ m_dji = dji;
+ m_currentILOffset.m_ilOffset = ilOffset;
+#ifdef WIN64EXCEPTIONS
+ m_currentILOffset.m_funcletIndex = PARENT_METHOD_INDEX;
+#endif
+
+ m_currentNativeOffset = m_dji->MapILOffsetToNative(m_currentILOffset);
+}
+
+bool DebuggerJitInfo::ILToNativeOffsetIterator::IsAtEnd()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_currentILOffset.m_ilOffset == INVALID_IL_OFFSET);
+}
+
+SIZE_T DebuggerJitInfo::ILToNativeOffsetIterator::Current(BOOL* pfExact)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (pfExact != NULL)
+ {
+ *pfExact = m_currentNativeOffset.m_fExact;
+ }
+ return m_currentNativeOffset.m_nativeOffset;
+}
+
+SIZE_T DebuggerJitInfo::ILToNativeOffsetIterator::CurrentAssertOnlyOne(BOOL* pfExact)
+{
+ WRAPPER_NO_CONTRACT;
+
+ SIZE_T nativeOffset = Current(pfExact);
+
+ Next();
+ _ASSERTE(IsAtEnd());
+
+ return nativeOffset;
+}
+
+void DebuggerJitInfo::ILToNativeOffsetIterator::Next()
+{
+#if defined(WIN64EXCEPTIONS)
+ NativeOffset tmpNativeOffset;
+
+ for (m_currentILOffset.m_funcletIndex += 1;
+ m_currentILOffset.m_funcletIndex < m_dji->GetFuncletCount();
+ m_currentILOffset.m_funcletIndex++)
+ {
+ tmpNativeOffset = m_dji->MapILOffsetToNative(m_currentILOffset);
+ if (tmpNativeOffset.m_nativeOffset != ((SIZE_T)-1) &&
+ tmpNativeOffset.m_nativeOffset != m_currentNativeOffset.m_nativeOffset)
+ {
+ m_currentNativeOffset = tmpNativeOffset;
+ break;
+ }
+ }
+
+ if (m_currentILOffset.m_funcletIndex == m_dji->GetFuncletCount())
+ {
+ m_currentILOffset.m_ilOffset = INVALID_IL_OFFSET;
+ }
+#else // !WIN64EXCEPTIONS
+ m_currentILOffset.m_ilOffset = INVALID_IL_OFFSET;
+#endif // !WIN64EXCEPTIONS
+}
+
+
+
+// SIZE_T DebuggerJitInfo::MapSpecialToNative(): Maps something like
+// a prolog to a native offset.
+// CordDebugMappingResult mapping: Mapping type to be looking for.
+// SIZE_T which: Which one. <TODO>For now, set to zero. <@todo Later, we'll
+// change this to some value that we get back from MapNativeToILOffset
+// to indicate which of the (possibly multiple epilogs) that may
+// be present.</TODO>
+
+SIZE_T DebuggerJitInfo::MapSpecialToNative(CorDebugMappingResult mapping,
+ SIZE_T which,
+ BOOL *pfAccurate)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(NULL != pfAccurate);
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO10000, "DJI::MSTN map:0x%x which:0x%x\n", mapping, which));
+
+ bool fFound;
+ SIZE_T cFound = 0;
+
+ DebuggerILToNativeMap *m = GetSequenceMap();
+ DebuggerILToNativeMap *mEnd = m + GetSequenceMapCount();
+ if (m)
+ {
+ while(m < mEnd)
+ {
+ _ASSERTE(m>=GetSequenceMap());
+
+ fFound = false;
+
+ if (DbgIsSpecialILOffset(m->ilOffset))
+ cFound++;
+
+ if (cFound == which)
+ {
+ _ASSERTE( (mapping == MAPPING_PROLOG &&
+ m->ilOffset == (ULONG) ICorDebugInfo::PROLOG) ||
+ (mapping == MAPPING_EPILOG &&
+ m->ilOffset == (ULONG) ICorDebugInfo::EPILOG) ||
+ ((mapping == MAPPING_NO_INFO || mapping == MAPPING_UNMAPPED_ADDRESS) &&
+ m->ilOffset == (ULONG) ICorDebugInfo::NO_MAPPING)
+ );
+
+ (*pfAccurate) = TRUE;
+ LOG((LF_CORDB, LL_INFO10000, "DJI::MSTN found mapping to nat:0x%x\n",
+ m->nativeStartOffset));
+ return m->nativeStartOffset;
+ }
+ m++;
+ }
+ }
+
+ LOG((LF_CORDB, LL_INFO10000, "DJI::MSTN No mapping found :(\n"));
+ (*pfAccurate) = FALSE;
+
+ return 0;
+}
+
+#if defined(WIN64EXCEPTIONS)
+//
+// DebuggerJitInfo::MapILOffsetToNativeForSetIP()
+//
+// This function maps an IL offset to a native offset, taking into account cloned finallys and nested EH clauses.
+//
+// parameters: offsetILTo - the destination IP, in IL offset
+// funcletIndexFrom - the funclet index of the source IP
+// pEHRT - tree structure for keeping track of EH clause information
+// pExact - pointer for returning whether the mapping is exact or not
+//
+// return value: destination IP, in native offset
+//
+SIZE_T DebuggerJitInfo::MapILOffsetToNativeForSetIP(SIZE_T offsetILTo, int funcletIndexFrom,
+ EHRangeTree* pEHRT, BOOL* pExact)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ DebuggerILToNativeMap* pMap = MapILOffsetToMapEntry(offsetILTo, pExact, TRUE);
+ DebuggerILToNativeMap* pMapEnd = GetSequenceMap() + GetSequenceMapCount();
+
+ _ASSERTE(pMap == m_sequenceMap ||
+ (pMap - 1)->ilOffset == (ULONG)ICorDebugInfo::NO_MAPPING ||
+ (pMap - 1)->ilOffset == (ULONG)ICorDebugInfo::PROLOG ||
+ (pMap - 1)->ilOffset == (ULONG)ICorDebugInfo::EPILOG ||
+ pMap->ilOffset > (pMap - 1)->ilOffset);
+
+ SIZE_T offsetNatTo = pMap->nativeStartOffset;
+
+ if (m_funcletCount == 0 ||
+ pEHRT == NULL ||
+ FAILED(pEHRT->m_hrInit))
+ {
+ return offsetNatTo;
+ }
+
+ // Multiple sequence points may have the same IL offset, which means that the code is duplicated in
+ // multiple funclets and/or in the parent method. If the destination offset maps to multiple sequence
+ // points (and hence to multiple funclets), we try to find the a sequence point which is in the same
+ // funclet as the source sequence point. If we can't find one, then the operation is going to fail
+ // anyway, so we just return the first sequence point we find.
+ for (DebuggerILToNativeMap* pMapCur = pMap + 1;
+ (pMapCur < pMapEnd) && (pMapCur->ilOffset == pMap->ilOffset);
+ pMapCur++)
+ {
+ int funcletIndexTo = GetFuncletIndex(pMapCur->nativeStartOffset, DebuggerJitInfo::GFIM_BYOFFSET);
+ if (funcletIndexFrom == funcletIndexTo)
+ {
+ return pMapCur->nativeStartOffset;
+ }
+ }
+
+ return offsetNatTo;
+}
+#endif // _WIN64
+
+// void DebuggerJitInfo::MapILRangeToMapEntryRange(): MIRTMER
+// calls MapILOffsetToNative for the startOffset (putting the
+// result into start), and the endOffset (putting the result into end).
+// SIZE_T startOffset: IL offset from beginning of function.
+// SIZE_T endOffset: IL offset from beginngin of function,
+// or zero to indicate that the end of the function should be used.
+// DebuggerILToNativeMap **start: Contains start & end
+// native offsets that correspond to startOffset. Set to NULL if
+// there is no mapping info.
+// DebuggerILToNativeMap **end: Contains start & end native
+// offsets that correspond to endOffset. Set to NULL if there
+// is no mapping info.
+void DebuggerJitInfo::MapILRangeToMapEntryRange(SIZE_T startOffset,
+ SIZE_T endOffset,
+ DebuggerILToNativeMap **start,
+ DebuggerILToNativeMap **end)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000000,
+ "DJI::MIRTMER: IL 0x%04x-0x%04x\n",
+ startOffset, endOffset));
+
+ if (GetSequenceMapCount() == 0)
+ {
+ *start = NULL;
+ *end = NULL;
+ return;
+ }
+
+ *start = MapILOffsetToMapEntry(startOffset);
+
+ //
+ // end points to the last range that endOffset maps to, not past
+ // the last range.
+ // We want to return the last IL, and exclude the epilog
+ if (endOffset == 0)
+ {
+ *end = GetSequenceMap() + GetSequenceMapCount() - 1;
+ _ASSERTE(*end>=m_sequenceMap);
+
+ while ( ((*end)->ilOffset == (ULONG) ICorDebugInfo::EPILOG||
+ (*end)->ilOffset == (ULONG) ICorDebugInfo::NO_MAPPING)
+ && (*end) > m_sequenceMap)
+ {
+ (*end)--;
+ _ASSERTE(*end>=m_sequenceMap);
+
+ }
+ }
+ else
+ *end = MapILOffsetToMapEntry(endOffset - 1, NULL
+ WIN64_ARG(FALSE));
+
+ _ASSERTE(*end>=m_sequenceMap);
+
+
+ LOG((LF_CORDB, LL_INFO1000000,
+ "DJI::MIRTMER: IL 0x%04x-0x%04x --> 0x%04x 0x%08x-0x%08x\n"
+ " --> 0x%04x 0x%08x-0x%08x\n",
+ startOffset, endOffset,
+ (*start)->ilOffset,
+ (*start)->nativeStartOffset, (*start)->nativeEndOffset,
+ (*end)->ilOffset,
+ (*end)->nativeStartOffset, (*end)->nativeEndOffset));
+}
+
+// @dbgtodo Microsoft inspection: This function has been replicated in DacDbiStructures so
+// this version can be deleted when inspection is complete.
+
+// DWORD DebuggerJitInfo::MapNativeOffsetToIL(): Given a native
+// offset for the DebuggerJitInfo, compute
+// the IL offset from the beginning of the same method.
+// Returns: Offset of the IL instruction that contains
+// the native offset,
+// SIZE_T nativeOffset: [IN] Native Offset
+// CorDebugMappingResult *map: [OUT] explains the
+// quality of the matching & special cases
+// SIZE_T which: It's possible to have multiple EPILOGs, or
+// multiple unmapped regions within a method. This opaque value
+// specifies which special region we're talking about. This
+// param has no meaning if map & (MAPPING_EXACT|MAPPING_APPROXIMATE)
+// Basically, this gets handed back to MapSpecialToNative, later.
+DWORD DebuggerJitInfo::MapNativeOffsetToIL(SIZE_T nativeOffsetToMap,
+ CorDebugMappingResult *map,
+ DWORD *which,
+ BOOL skipPrologs)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(map != NULL);
+ PRECONDITION(which != NULL);
+ }
+ CONTRACTL_END;
+
+ DWORD nativeOffset = (DWORD)nativeOffsetToMap;
+
+ (*which) = 0;
+ DebuggerILToNativeMap *m = GetSequenceMap();
+ DebuggerILToNativeMap *mEnd = m + GetSequenceMapCount();
+
+ LOG((LF_CORDB,LL_INFO10000,"DJI::MNOTI: nativeOffset = 0x%x\n", nativeOffset));
+
+ if (m)
+ {
+ while (m < mEnd)
+ {
+ _ASSERTE(m>=m_sequenceMap);
+
+#ifdef LOGGING
+ if (m->ilOffset == (ULONG) ICorDebugInfo::PROLOG )
+ LOG((LF_CORDB,LL_INFO10000,"DJI::MNOTI: m->natStart:0x%x m->natEnd:0x%x il:PROLOG\n", m->nativeStartOffset, m->nativeEndOffset));
+ else if (m->ilOffset == (ULONG) ICorDebugInfo::EPILOG )
+ LOG((LF_CORDB,LL_INFO10000,"DJI::MNOTI: m->natStart:0x%x m->natEnd:0x%x il:EPILOG\n", m->nativeStartOffset, m->nativeEndOffset));
+ else if (m->ilOffset == (ULONG) ICorDebugInfo::NO_MAPPING)
+ LOG((LF_CORDB,LL_INFO10000,"DJI::MNOTI: m->natStart:0x%x m->natEnd:0x%x il:NO MAP\n", m->nativeStartOffset, m->nativeEndOffset));
+ else
+ LOG((LF_CORDB,LL_INFO10000,"DJI::MNOTI: m->natStart:0x%x m->natEnd:0x%x il:0x%x src:0x%x\n", m->nativeStartOffset, m->nativeEndOffset, m->ilOffset, m->source));
+#endif // LOGGING
+
+ if (m->ilOffset == (ULONG) ICorDebugInfo::PROLOG ||
+ m->ilOffset == (ULONG) ICorDebugInfo::EPILOG ||
+ m->ilOffset == (ULONG) ICorDebugInfo::NO_MAPPING)
+ {
+ (*which)++;
+ }
+
+ if (nativeOffset >= m->nativeStartOffset
+ && ((m->nativeEndOffset == 0 &&
+ m->ilOffset != (ULONG) ICorDebugInfo::PROLOG)
+ || nativeOffset < m->nativeEndOffset))
+ {
+ ULONG ilOff = m->ilOffset;
+
+ if( m->ilOffset == (ULONG) ICorDebugInfo::PROLOG )
+ {
+ if (skipPrologs && nativeOffset < m->nativeEndOffset)
+ {
+ // If the caller requested to skip prologs, we simply restart the walk
+ // with the offset set to the end of the prolog.
+ m = GetSequenceMap();
+ nativeOffset = m->nativeEndOffset;
+ continue;
+ }
+
+ ilOff = 0;
+ (*map) = MAPPING_PROLOG;
+ LOG((LF_CORDB,LL_INFO10000,"DJI::MNOTI: MAPPING_PROLOG\n"));
+
+ }
+ else if (m->ilOffset == (ULONG) ICorDebugInfo::NO_MAPPING)
+ {
+ ilOff = 0;
+ (*map) = MAPPING_UNMAPPED_ADDRESS ;
+ LOG((LF_CORDB,LL_INFO10000,"DJI::MNOTI:MAPPING_"
+ "UNMAPPED_ADDRESS\n"));
+ }
+ else if( m->ilOffset == (ULONG) ICorDebugInfo::EPILOG )
+ {
+ ilOff = m_lastIL;
+ (*map) = MAPPING_EPILOG;
+ LOG((LF_CORDB,LL_INFO10000,"DJI::MNOTI:MAPPING_EPILOG\n"));
+ }
+ else if (nativeOffset == m->nativeStartOffset)
+ {
+ (*map) = MAPPING_EXACT;
+ LOG((LF_CORDB,LL_INFO10000,"DJI::MNOTI:MAPPING_EXACT\n"));
+ }
+ else
+ {
+ (*map) = MAPPING_APPROXIMATE;
+ LOG((LF_CORDB,LL_INFO10000,"DJI::MNOTI:MAPPING_"
+ "APPROXIMATE\n"));
+ }
+
+ return ilOff;
+ }
+ m++;
+ }
+ }
+
+ (*map) = MAPPING_NO_INFO;
+ LOG((LF_CORDB,LL_INFO10000,"DJI::MNOTI:NO_INFO\n"));
+ return 0;
+}
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+DebuggerJitInfo::~DebuggerJitInfo()
+{
+ TRACE_FREE(m_sequenceMap);
+ if (m_sequenceMap != NULL)
+ {
+ DeleteInteropSafe(((BYTE *)m_sequenceMap));
+ }
+
+ TRACE_FREE(m_varNativeInfo);
+ if (m_varNativeInfo != NULL)
+ {
+ DeleteInteropSafe(m_varNativeInfo);
+ }
+
+#if defined(WIN64EXCEPTIONS)
+ if (m_rgFunclet)
+ {
+ DeleteInteropSafe(m_rgFunclet);
+ m_rgFunclet = NULL;
+ }
+#endif // WIN64EXCEPTIONS
+
+
+#ifdef _DEBUG
+ // Trash pointers to garbage.
+ // Don't null out since there may be runtime checks against NULL.
+ // Set to a non-null random pointer value that will cause an immediate AV on deref.
+ m_fd = (MethodDesc*) 0x1;
+ m_methodInfo = (DebuggerMethodInfo*) 0x1;
+ m_prevJitInfo = (DebuggerJitInfo*) 0x01;
+ m_nextJitInfo = (DebuggerJitInfo*) 0x01;
+#endif
+
+
+ LOG((LF_CORDB,LL_EVERYTHING, "DJI::~DJI : deleted at 0x%p\n", this));
+}
+
+
+// Lazy initialize the Debugger-Jit-Info
+void DebuggerJitInfo::LazyInitBounds()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(ThisMaybeHelperThread());
+ PRECONDITION(!g_pDebugger->HasDebuggerDataLock());
+ } CONTRACTL_END;
+
+ //@todo: this method is not synchronized. Mei-chin's recent work should cover this one
+ // Only attempt lazy-init once
+ // new LOG message
+ LOG((LF_CORDB,LL_EVERYTHING, "DJI::LazyInitBounds: this=0x%x m_fAttemptInit %s\n", this, m_fAttemptInit == true? "true": "false"));
+ if (m_fAttemptInit)
+ {
+ return;
+ }
+ m_fAttemptInit = true;
+
+ EX_TRY
+ {
+ LOG((LF_CORDB,LL_EVERYTHING, "DJI::LazyInitBounds: this=0x%x Initing\n", this));
+ // Should have already been jitted
+ _ASSERTE(this->m_jitComplete);
+
+ MethodDesc * mdesc = this->m_fd;
+
+ DebugInfoRequest request;
+
+ _ASSERTE(this->m_addrOfCode != NULL); // must have address to disambguate the Enc cases.
+ // Caller already resolved generics when they craeted the DJI, so we don't need to repeat.
+ // Note the MethodDesc may not yet have the jitted info, so we'll also use the starting address we got in the jit complete callback.
+ request.InitFromStartingAddr(mdesc, (PCODE)this->m_addrOfCode);
+
+
+ // Bounds info.
+ ULONG32 cMap = 0;
+ ICorDebugInfo::OffsetMapping *pMap = NULL;
+ ULONG32 cVars = 0;
+ ICorDebugInfo::NativeVarInfo *pVars = NULL;
+
+ BOOL fSuccess = DebugInfoManager::GetBoundariesAndVars(
+ request,
+ InteropSafeNew, NULL, // allocator
+ &cMap, &pMap,
+ &cVars, &pVars);
+ LOG((LF_CORDB,LL_EVERYTHING, "DJI::LazyInitBounds: this=0x%x GetBoundariesAndVars success=0x%x\n", this, fSuccess));
+ if (fSuccess)
+ {
+ this->SetBoundaries(cMap, pMap);
+ this->SetVars(cVars, pVars);
+ }
+ }
+ EX_CATCH
+ {
+ LOG((LF_CORDB,LL_WARNING, "DJI::LazyInitBounds: this=0x%x Exception was thrown and caught\n", this));
+ // Just catch the exception. The DJI maps may or may-not be intialized,
+ // but they should still be in a consistent state, so we should be ok.
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
+/******************************************************************************
+ * SetVars() takes ownership of pVars
+ ******************************************************************************/
+void DebuggerJitInfo::SetVars(ULONG32 cVars, ICorDebugInfo::NativeVarInfo *pVars)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_varNativeInfo)
+ {
+ return;
+ }
+
+ m_varNativeInfo = pVars;
+ m_varNativeInfoCount = cVars;
+
+ LOG((LF_CORDB, LL_INFO1000000, "D::sV: var count is %d\n",
+ m_varNativeInfoCount));
+
+#ifdef LOGGING
+ for (unsigned int i = 0; i < m_varNativeInfoCount; i++)
+ {
+ ICorDebugInfo::NativeVarInfo* vni = &(m_varNativeInfo[i]);
+ _dumpVarNativeInfo(vni);
+ }
+#endif
+}
+
+CHECK DebuggerJitInfo::Check() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CHECK_OK;
+}
+
+// Invariants for a DebuggerJitInfo
+// These should always be true at any well defined point.
+CHECK DebuggerJitInfo::Invariant() const
+{
+ LIMITED_METHOD_CONTRACT;
+ CHECK((m_sequenceMapCount == 0) == (m_sequenceMap == NULL));
+ CHECK(m_methodInfo != NULL);
+ CHECK(m_fd != NULL);
+
+ CHECK_OK;
+}
+
+
+#if !defined(DACCESS_COMPILE)
+/******************************************************************************
+ * SetBoundaries() takes ownership of pMap
+ ******************************************************************************/
+void DebuggerJitInfo::SetBoundaries(ULONG32 cMap, ICorDebugInfo::OffsetMapping *pMap)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_EVERYTHING, "DJI::SetBoundaries: this=0x%x cMap=0x%x pMap=0x%x\n", this, cMap, pMap));
+ _ASSERTE((cMap == 0) == (pMap == NULL));
+
+ if (cMap == 0)
+ return;
+
+ if (m_sequenceMap)
+ {
+ return;
+ }
+
+ ULONG ilLast = 0;
+#ifdef _DEBUG
+ // We assume that the map is sorted by native offset
+ if (cMap > 1)
+ {
+ for(ICorDebugInfo::OffsetMapping * pEntry = pMap;
+ pEntry < (pMap + cMap - 1);
+ pEntry++)
+ {
+ _ASSERTE(pEntry->nativeOffset <= (pEntry+1)->nativeOffset);
+ }
+ }
+#endif //_DEBUG
+
+ //
+ // <TODO>@todo perf: allocating these on the heap is slow. We could do
+ // better knowing that these live for the life of the run, just
+ // like the DebuggerJitInfo's.</TODO>
+ //
+ m_sequenceMap = (DebuggerILToNativeMap *)new (interopsafe) DebuggerILToNativeMap[cMap];
+ LOG((LF_CORDB,LL_EVERYTHING, "DJI::SetBoundaries: this=0x%x m_sequenceMap=0x%x\n", this, m_sequenceMap));
+ _ASSERTE(m_sequenceMap != NULL); // we'll throw on null
+
+ m_sequenceMapCount = cMap;
+
+ DebuggerILToNativeMap *m = m_sequenceMap;
+
+ // For the instrumented-IL case, we need to remove all duplicate entries.
+ // So we keep a record of the last old IL offset. If the current old IL
+ // offset is the same as the last old IL offset, we remove it.
+ // Pick a unique initial value (-10) so that the 1st doesn't accidentally match.
+ int ilPrevOld = -10;
+
+ InstrumentedILOffsetMapping mapping =
+ m_methodInfo->GetRuntimeModule()->GetInstrumentedILOffsetMapping(m_methodInfo->m_token);
+
+ //
+ // <TODO>@todo perf: we could do the vast majority of this
+ // post-processing work the first time the sequence point map is
+ // demanded. That would allow us to simply hold the raw array for
+ // 95% of the functions jitted while debugging, and 100% of them
+ // when just running/tracking.</TODO>
+ const DWORD call_inst = (DWORD)ICorDebugInfo::CALL_INSTRUCTION;
+ for(ULONG32 idxJitMap = 0; idxJitMap < cMap; idxJitMap++)
+ {
+ const ICorDebugInfo::OffsetMapping * const pMapEntry = &pMap[idxJitMap];
+ _ASSERTE(m >= m_sequenceMap);
+ _ASSERTE(m < m_sequenceMap + m_sequenceMapCount);
+
+ ilLast = max((int)ilLast, (int)pMapEntry->ilOffset);
+
+ // Simply copy everything over, since we translate to
+ // CorDebugMappingResults immediately prior to handing
+ // back to user...
+ m->nativeStartOffset = pMapEntry->nativeOffset;
+ m->ilOffset = pMapEntry->ilOffset;
+ m->source = pMapEntry->source;
+
+ // Keep in mind that if we have an instrumented code translation
+ // table, we may have asked for completely different IL offsets
+ // than the user thinks we did.....
+
+ // If we did instrument, then we can't have any sequence points that
+ // are "in-between" the old-->new map that the profiler gave us.
+ // Ex, if map is:
+ // (6 old -> 36 new)
+ // (8 old -> 50 new)
+ // And the jit gives us an entry for 44 new, that will map back to 6 old.
+ // Since the map can only have one entry for 6 old, we remove 44 new.
+ if (m_methodInfo->HasInstrumentedILMap())
+ {
+ int ilThisOld = m_methodInfo->TranslateToInstIL(&mapping,
+ pMapEntry->ilOffset,
+ bInstrumentedToOriginal);
+
+ if (ilThisOld == ilPrevOld)
+ {
+ // If this translated to the same old IL offset as the last entry,
+ // then this is "in between". Skip it.
+ m_sequenceMapCount--; // one less seq point in the DJI's map
+ continue;
+ }
+ m->ilOffset = ilThisOld;
+ ilPrevOld = ilThisOld;
+ }
+
+ if (m > m_sequenceMap && (m->source & call_inst) != call_inst)
+ {
+ DebuggerILToNativeMap *last = m-1;
+ if ((last->source & call_inst) == call_inst)
+ last = (last > m_sequenceMap) ? last - 1 : NULL;
+
+ if (last && (last->source & call_inst) != call_inst && m->ilOffset == last->ilOffset)
+ {
+ // JIT gave us an extra entry (probably zero), so mush
+ // it into the one we've already got.
+ // <TODO> Why does this happen?</TODO>
+ m_sequenceMapCount--;
+ continue;
+ }
+ }
+
+
+ // Move to next entry in the debugger's table
+ m++;
+ } // end for
+
+ DeleteInteropSafe(pMap);
+
+ _ASSERTE(m == m_sequenceMap + m_sequenceMapCount);
+
+ m_lastIL = ilLast;
+
+ // Set nativeEndOffset in debugger's il->native map
+ // Do this before we resort by IL.
+ unsigned int i;
+ for(i = 0; i < m_sequenceMapCount - 1; i++)
+ {
+ // We need to not use CALL_INSTRUCTION's IL start offset.
+ unsigned int j = i + 1;
+ while ((m_sequenceMap[j].source & call_inst) == call_inst && j < m_sequenceMapCount-1)
+ j++;
+
+ m_sequenceMap[i].nativeEndOffset = m_sequenceMap[j].nativeStartOffset;
+ }
+
+ m_sequenceMap[i].nativeEndOffset = 0;
+ m_sequenceMap[i].source = (ICorDebugInfo::SourceTypes)
+ ((DWORD) m_sequenceMap[i].source |
+ (DWORD)ICorDebugInfo::NATIVE_END_OFFSET_UNKNOWN);
+
+ // Now resort by IL.
+ MapSortIL isort(m_sequenceMap, m_sequenceMapCount);
+
+ isort.Sort();
+
+ m_sequenceMapSorted = true;
+
+ m_callsiteMapCount = m_sequenceMapCount;
+ while (m_sequenceMapCount > 0 && (m_sequenceMap[m_sequenceMapCount-1].source & call_inst) == call_inst)
+ m_sequenceMapCount--;
+
+ m_callsiteMap = m_sequenceMap + m_sequenceMapCount;
+ m_callsiteMapCount -= m_sequenceMapCount;
+
+ LOG((LF_CORDB, LL_INFO100000, "DJI::SetBoundaries: this=0x%x boundary count is %d (%d callsites)\n",
+ this, m_sequenceMapCount, m_callsiteMapCount));
+
+#ifdef LOGGING
+ for (unsigned int count = 0; count < m_sequenceMapCount + m_callsiteMapCount; count++)
+ {
+ if( m_sequenceMap[count].ilOffset ==
+ (ULONG) ICorDebugInfo::PROLOG )
+ LOG((LF_CORDB, LL_INFO1000000,
+ "D::sB: PROLOG --> 0x%08x -- 0x%08x",
+ m_sequenceMap[count].nativeStartOffset,
+ m_sequenceMap[count].nativeEndOffset));
+ else if ( m_sequenceMap[count].ilOffset ==
+ (ULONG) ICorDebugInfo::EPILOG )
+ LOG((LF_CORDB, LL_INFO1000000,
+ "D::sB: EPILOG --> 0x%08x -- 0x%08x",
+ m_sequenceMap[count].nativeStartOffset,
+ m_sequenceMap[count].nativeEndOffset));
+ else if ( m_sequenceMap[count].ilOffset ==
+ (ULONG) ICorDebugInfo::NO_MAPPING )
+ LOG((LF_CORDB, LL_INFO1000000,
+ "D::sB: NO MAP --> 0x%08x -- 0x%08x",
+ m_sequenceMap[count].nativeStartOffset,
+ m_sequenceMap[count].nativeEndOffset));
+ else
+ LOG((LF_CORDB, LL_INFO1000000,
+ "D::sB: 0x%04x (Real:0x%04x) --> 0x%08x -- 0x%08x",
+ m_sequenceMap[count].ilOffset,
+ m_methodInfo->TranslateToInstIL(&mapping,
+ m_sequenceMap[count].ilOffset,
+ bOriginalToInstrumented),
+ m_sequenceMap[count].nativeStartOffset,
+ m_sequenceMap[count].nativeEndOffset));
+
+ LOG((LF_CORDB, LL_INFO1000000, " Src:0x%x\n", m_sequenceMap[count].source));
+
+ }
+#endif //LOGGING
+}
+#endif // !DACCESS_COMPILE
+
+// Init a DJI after it's jitted.
+void DebuggerJitInfo::Init(TADDR newAddress)
+{
+ // Shouldn't initialize while holding the lock b/c intialzing may call functions that lock,
+ // and thus we'd have a locking violation.
+ _ASSERTE(!g_pDebugger->HasDebuggerDataLock());
+
+ this->m_addrOfCode = (ULONG_PTR)PTR_TO_CORDB_ADDRESS((BYTE*) newAddress);
+ this->m_jitComplete = true;
+
+ this->m_codeRegionInfo.InitializeFromStartAddress((PCODE)this->m_addrOfCode);
+ this->m_sizeOfCode = this->m_codeRegionInfo.getSizeOfTotalCode();
+
+ this->m_encVersion = this->m_methodInfo->GetCurrentEnCVersion();
+
+#if defined(WIN64EXCEPTIONS)
+ this->InitFuncletAddress();
+#endif // WIN64EXCEPTIONS
+
+ LOG((LF_CORDB,LL_INFO10000,"De::JITCo:Got DJI 0x%p(V %d),"
+ "Hot section from 0x%p to 0x%p "
+ "Cold section from 0x%p to 0x%p "
+ "varCount=%d seqCount=%d\n",
+ this, this->m_encVersion,
+ this->m_codeRegionInfo.getAddrOfHotCode(),
+ this->m_codeRegionInfo.getAddrOfHotCode() + this->m_codeRegionInfo.getSizeOfHotCode(),
+ this->m_codeRegionInfo.getAddrOfColdCode(),
+ this->m_codeRegionInfo.getAddrOfColdCode() + this->m_codeRegionInfo.getSizeOfColdCode(),
+ (ULONG)this->m_addrOfCode,
+ (ULONG)this->m_addrOfCode+(ULONG)this->m_sizeOfCode,
+ this->GetVarNativeInfoCount(),
+ this->GetSequenceMapCount()));
+
+#if defined(LOGGING)
+ for (unsigned int i = 0; i < this->GetSequenceMapCount(); i++)
+ {
+ LOG((LF_CORDB, LL_INFO10000, "De::JITCo: seq map 0x%x - "
+ "IL offset 0x%x native start offset 0x%x native end offset 0x%x source 0x%x\n",
+ i, this->GetSequenceMap()[i].ilOffset,
+ this->GetSequenceMap()[i].nativeStartOffset,
+ this->GetSequenceMap()[i].nativeEndOffset,
+ this->GetSequenceMap()[i].source));
+ }
+#endif // LOGGING
+
+}
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+ICorDebugInfo::SourceTypes DebuggerJitInfo::GetSrcTypeFromILOffset(SIZE_T ilOffset)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ BOOL exact = FALSE;
+ DebuggerILToNativeMap *pMap = MapILOffsetToMapEntry(ilOffset, &exact);
+
+ LOG((LF_CORDB, LL_INFO100000, "DJI::GSTFILO: for il 0x%x, got entry 0x%p,"
+ "(il 0x%x) nat 0x%x to 0x%x, SourceTypes 0x%x, exact:%x\n", ilOffset, pMap,
+ pMap->ilOffset, pMap->nativeStartOffset, pMap->nativeEndOffset, pMap->source,
+ exact));
+
+ if (!exact)
+ {
+ return ICorDebugInfo::SOURCE_TYPE_INVALID;
+ }
+
+ return pMap->source;
+}
+
+/******************************************************************************
+ *
+ ******************************************************************************/
+DebuggerMethodInfo::~DebuggerMethodInfo()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ DESTRUCTOR_CHECK;
+ }
+ CONTRACTL_END;
+
+ DeleteJitInfoList();
+
+ LOG((LF_CORDB,LL_EVERYTHING, "DMI::~DMI : deleted at 0x%p\n", this));
+}
+
+// Translate between old & new offsets (w/ respect to Instrumented IL).
+
+// Don't interpolate
+ULONG32 DebuggerMethodInfo::TranslateToInstIL(const InstrumentedILOffsetMapping * pMapping,
+ ULONG32 offOrig,
+ bool fOrigToInst)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ SIZE_T iMap;
+ SIZE_T cMap = pMapping->GetCount();
+ // some negative IL offsets have special meaning. Don't translate
+ // those (just return as is). See ICorDebugInfo::MappingTypes
+ if ((cMap == 0) || (offOrig < 0))
+ {
+ return offOrig;
+ }
+
+ ARRAY_PTR_COR_IL_MAP rgMap = pMapping->GetOffsets();
+
+ // This assumes:
+ // - map is sorted in increasing order by both old & new
+ // - round down.
+ if (fOrigToInst)
+ {
+ // Translate: old --> new
+
+ // Treat it as prolog if offOrig is not in remapping range
+ if ((offOrig < rgMap[0].oldOffset) || (offOrig == (ULONG32)ICorDebugInfo::PROLOG))
+ {
+ return (ULONG32)ICorDebugInfo::PROLOG;
+ }
+
+ if (offOrig == (ULONG32)ICorDebugInfo::EPILOG)
+ {
+ return (ULONG32)ICorDebugInfo::EPILOG;
+ }
+
+ if (offOrig == (ULONG32)ICorDebugInfo::NO_MAPPING)
+ {
+ return (ULONG32)ICorDebugInfo::NO_MAPPING;
+ }
+
+ for(iMap = 1; iMap < cMap; iMap++)
+ {
+ if (offOrig < rgMap[iMap].oldOffset)
+ return rgMap[iMap-1].newOffset;
+ }
+
+ return rgMap[iMap - 1].newOffset;
+ }
+ else
+ {
+ // Translate: new --> old
+
+ // Treat it as prolog if offOrig is not in remapping range
+ if ((offOrig < rgMap[0].newOffset) || (offOrig == (ULONG32)ICorDebugInfo::PROLOG))
+ {
+ return (ULONG32)ICorDebugInfo::PROLOG;
+ }
+
+ if (offOrig == (ULONG32)ICorDebugInfo::EPILOG)
+ {
+ return (ULONG32)ICorDebugInfo::EPILOG;
+ }
+
+ if (offOrig == (ULONG32)ICorDebugInfo::NO_MAPPING)
+ {
+ return (ULONG32)ICorDebugInfo::NO_MAPPING;
+ }
+
+ for(iMap = 1; iMap < cMap; iMap++)
+ {
+ if (offOrig < rgMap[iMap].newOffset)
+ return rgMap[iMap-1].oldOffset;
+ }
+
+ return rgMap[iMap - 1].oldOffset;
+ }
+}
+
+/******************************************************************************
+ * Constructor for DebuggerMethodInfo
+ ******************************************************************************/
+DebuggerMethodInfo::DebuggerMethodInfo(Module *module, mdMethodDef token) :
+ m_currentEnCVersion(CorDB_DEFAULT_ENC_FUNCTION_VERSION),
+ m_module(module),
+ m_token(token),
+ m_prevMethodInfo(NULL),
+ m_nextMethodInfo(NULL),
+ m_latestJitInfo(NULL),
+ m_fHasInstrumentedILMap(false)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ WRAPPER(THROWS);
+ WRAPPER(GC_TRIGGERS);
+ CONSTRUCTOR_CHECK;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_EVERYTHING, "DMI::DMI : created at 0x%p\n", this));
+
+ _ASSERTE(g_pDebugger->HasDebuggerDataLock());
+
+ DebuggerModule * pModule = GetPrimaryModule();
+
+ m_fJMCStatus = false;
+
+ // If there's no module, then this isn't a JMC function.
+ // This can happen since DMIs are created for debuggable code, and
+ // Modules are only created if a debugger is actually attached.
+ if (pModule != NULL)
+ {
+ // Use the accessor so that we keep the module's count properly updated.
+ SetJMCStatus(pModule->GetRuntimeModule()->GetJMCStatus());
+ }
+ }
+
+
+/******************************************************************************
+ * Get the primary debugger module for this DMI. This is 1:1 w/ an EE Module.
+ ******************************************************************************/
+DebuggerModule* DebuggerMethodInfo::GetPrimaryModule()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_pDebugger->HasDebuggerDataLock());
+
+ DebuggerModuleTable * pTable = g_pDebugger->GetModuleTable();
+
+ // If we're tracking debug info but no debugger's attached, then
+ // we won't have a table for the modules yet.
+ if (pTable == NULL)
+ return NULL;
+
+ DebuggerModule * pModule = pTable->GetModule(GetRuntimeModule());
+ if (pModule == NULL)
+ {
+ // We may be missing the module even if we have the table.
+ // 1.) If there's no debugger attached (so we're not getting ModuleLoad events).
+ // 2.) If we're asking for this while in DllMain of the module we're currently loading,
+ // we won't have gotten the ModuleLoad event yet.
+ return NULL;
+ }
+
+ // Only give back primary modules...
+ DebuggerModule * p2 = pModule->GetPrimaryModule();
+ _ASSERTE(p2 != NULL);
+
+ return p2;
+}
+
+/******************************************************************************
+ * Get the runtime module for this DMI
+ ******************************************************************************/
+Module * DebuggerMethodInfo::GetRuntimeModule()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_module;
+}
+
+#endif // !DACCESS_COMPILE
+
+
+//---------------------------------------------------------------------------------------
+//
+// Find the DebuggerJitInfo (DJI) for the given MethodDesc and native start address.
+// We need the native start address because generic methods may have multiple instances
+// of jitted code. This function does not create the DJI if it does not already exist.
+//
+// Arguments:
+// pMD - the MD to lookup; must be non-NULL
+// addrNativeStartAddr - the native start address of jitted code
+//
+// Return Value:
+// Returns the DJI corresponding to the specified MD and native start address.
+// NULL if the DJI is not found.
+//
+
+DebuggerJitInfo * DebuggerMethodInfo::FindJitInfo(MethodDesc * pMD,
+ TADDR addrNativeStartAddr)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ SUPPORTS_DAC;
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(pMD != NULL);
+ }
+ CONTRACTL_END;
+
+
+ DebuggerJitInfo * pCheck = m_latestJitInfo;
+ while (pCheck != NULL)
+ {
+ if ( (pCheck->m_fd == dac_cast<PTR_MethodDesc>(pMD)) &&
+ (pCheck->m_addrOfCode == addrNativeStartAddr) )
+ {
+ return pCheck;
+ }
+
+ pCheck = pCheck->m_prevJitInfo;
+ }
+
+ return NULL;
+}
+
+
+#if !defined(DACCESS_COMPILE)
+
+/*
+ * FindOrCreateInitAndAddJitInfo
+ *
+ * This routine allocates a new DJI, adding it to the DMI.
+ *
+ * Parameters:
+ * fd - the method desc to create a DJI for.
+ *
+ * Returns
+ * A pointer to the created DJI, or NULL.
+ *
+ */
+
+DebuggerJitInfo *DebuggerMethodInfo::FindOrCreateInitAndAddJitInfo(MethodDesc* fd)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(fd != NULL);
+
+ // This will grab the latest EnC version.
+ TADDR addr = (TADDR) g_pEEInterface->GetFunctionAddress(fd);
+
+ if (addr == NULL)
+ return NULL;
+
+ // Check the lsit to see if we've already populated an entry for this JitInfo.
+ // If we didn't have a JitInfo before, lazily create it now.
+ // We don't care if we were prejitted or not.
+ //
+ // We haven't got the lock yet so we'll repeat this lookup once
+ // we've taken the lock.
+ DebuggerJitInfo * pResult = FindJitInfo(fd, addr);
+ if (pResult != NULL)
+ {
+ // Found!
+ return pResult;
+ }
+
+
+ // CreateInitAndAddJitInfo takes a lock and checks the list again, which
+ // makes this thread-safe.
+ return CreateInitAndAddJitInfo(fd, addr);
+}
+
+// Create a DJI around a method-desc. The EE already has all the information we need for a DJI,
+// the DJI just serves as a cache of the information for the debugger.
+// Caller makes no guarantees about whether the DJI is already in the table. (Caller should avoid this if
+// it knows it's in the table, but b/c we can't expect caller to synchronize w/ the other threads).
+DebuggerJitInfo *DebuggerMethodInfo::CreateInitAndAddJitInfo(MethodDesc* fd, TADDR startAddr)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(!g_pDebugger->HasDebuggerDataLock());
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(fd != NULL);
+
+ // May or may-not be jitted, that's why we passed in the start addr & size explicitly.
+ _ASSERTE(startAddr != NULL);
+
+
+ // No support for light-weight codegen methods.
+ if (fd->IsDynamicMethod())
+ {
+ return NULL;
+ }
+
+
+ DebuggerJitInfo *dji = new (interopsafe) DebuggerJitInfo(this, fd);
+ _ASSERTE(dji != NULL); // throws on oom error
+
+ _ASSERTE(dji->m_methodInfo == this); // this should be set
+
+ TRACE_ALLOC(dji);
+
+ // Init may take locks that violate the debugger-data lock, so we can't init while we hold that lock.
+ // But we can't init after we add it to the table and release the lock b/c another thread may pick
+ // if up in the uninitialized state.
+ // So we initialize a private copy of the DJI before we take the debugger-data lock.
+ dji->Init(startAddr);
+
+ dji->m_nextJitInfo = NULL;
+
+ //
+ //<TODO>@TODO : _ASSERTE(EnC);</TODO>
+ //
+ {
+ Debugger::DebuggerDataLockHolder debuggerDataLockHolder(g_pDebugger);
+
+ // We need to ensure that another thread didn't go in and add this exact same DJI?
+ {
+ DebuggerJitInfo * pResult = FindJitInfo(dji->m_fd, (TADDR)dji->m_addrOfCode);
+ if (pResult != NULL)
+ {
+ // Found!
+ _ASSERTE(pResult->m_sizeOfCode == dji->m_sizeOfCode);
+ DeleteInteropSafe(dji);
+ return pResult;
+ }
+ }
+
+ // We know it's not in the table. Go add it!
+ DebuggerJitInfo *djiPrev = m_latestJitInfo;
+
+ LOG((LF_CORDB,LL_INFO10000,"DMI:CAAJI: current head of dji list:0x%08x\n", djiPrev));
+
+ if (djiPrev != NULL)
+ {
+ dji->m_prevJitInfo = djiPrev;
+ djiPrev->m_nextJitInfo = dji;
+
+ m_latestJitInfo = dji;
+
+ LOG((LF_CORDB,LL_INFO10000,"DMI:CAAJI: DJI version 0x%04x for %s\n",
+ GetCurrentEnCVersion(),
+ dji->m_fd->m_pszDebugMethodName));
+ }
+ else
+ {
+ m_latestJitInfo = dji;
+ }
+
+ } // DebuggerDataLockHolder out of scope - release implied
+
+ // We've now added a new DJI into the table and released the lock. Thus any other thread
+ // can come and use our DJI. Good thing we inited the DJI _before_ adding it to the table.
+
+ LOG((LF_CORDB,LL_INFO10000,"DMI:CAAJI: new head of dji list:0x%08x\n", m_latestJitInfo));
+
+ return dji;
+}
+
+/*
+ * DeleteJitInfo
+ *
+ * This routine remove a DJI from the DMI's list and deletes the memory.
+ *
+ * Parameters:
+ * dji - The DJI to delete.
+ *
+ * Returns
+ * None.
+ *
+ */
+
+void DebuggerMethodInfo::DeleteJitInfo(DebuggerJitInfo *dji)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Debugger::DebuggerDataLockHolder debuggerDataLockHolder(g_pDebugger);
+
+ LOG((LF_CORDB,LL_INFO10000,"DMI:DJI: dji:0x%08x\n", dji));
+
+ DebuggerJitInfo *djiPrev = dji->m_prevJitInfo;
+
+ if (djiPrev != NULL)
+ {
+ djiPrev->m_nextJitInfo = dji->m_nextJitInfo;
+ }
+
+ if (dji->m_nextJitInfo != NULL)
+ {
+ dji->m_nextJitInfo->m_prevJitInfo = djiPrev;
+ }
+ else
+ {
+ //
+ // This DJI is the head of the list
+ //
+ _ASSERTE(m_latestJitInfo == dji);
+
+ m_latestJitInfo = djiPrev;
+ }
+
+ TRACE_FREE(dji);
+
+ DeleteInteropSafe(dji);
+
+ // DebuggerDataLockHolder out of scope - release implied
+}
+
+/*
+ * DeleteJitInfoList
+ *
+ * This routine removes all the DJIs from the current DMI.
+ *
+ * Parameters:
+ * None.
+ *
+ * Returns
+ * None.
+ *
+ */
+
+void DebuggerMethodInfo::DeleteJitInfoList(void)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ Debugger::DebuggerDataLockHolder debuggerDataLockHolder(g_pDebugger);
+
+ while(m_latestJitInfo != NULL)
+ {
+ DeleteJitInfo(m_latestJitInfo);
+ }
+
+ // DebuggerDataLockHolder out of scope - release implied
+}
+
+
+// Iterate through all existing DJIs. See header for expected usage.
+DebuggerMethodInfo::DJIIterator::DJIIterator()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pCurrent = NULL;
+ m_pLoaderModuleFilter = NULL;
+}
+
+bool DebuggerMethodInfo::DJIIterator::IsAtEnd()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pCurrent == NULL;
+}
+
+DebuggerJitInfo * DebuggerMethodInfo::DJIIterator::Current()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pCurrent;
+}
+
+void DebuggerMethodInfo::DJIIterator::Next(BOOL fFirst /*=FALSE*/)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ CANNOT_TAKE_LOCK;
+ }
+ CONTRACTL_END;
+
+ if (!fFirst)
+ {
+ PREFIX_ASSUME(m_pCurrent != NULL); // IsAtEnd() should have caught this.
+ m_pCurrent = m_pCurrent->m_prevJitInfo;
+ }
+
+ // Check if we're at the end of the list, in which case we're done.
+ for ( ; m_pCurrent != NULL; m_pCurrent = m_pCurrent->m_prevJitInfo)
+ {
+ Module * pLoaderModule = m_pCurrent->m_pLoaderModule;
+
+ // Obey the module filter if it's provided
+ if ((m_pLoaderModuleFilter != NULL) && (m_pLoaderModuleFilter != pLoaderModule))
+ continue;
+
+ // Skip modules that are unloaded, but still hanging around. Note that we can't use DebuggerModule for this check
+ // because of it is deleted pretty early during unloading, and we do not want to recreate it.
+ if (pLoaderModule->GetLoaderAllocator()->IsUnloaded())
+ continue;
+
+ break;
+ }
+}
+
+
+/******************************************************************************
+ * Return true iff this method is jitted
+ ******************************************************************************/
+bool DebuggerMethodInfo::HasJitInfos()
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(g_pDebugger->HasDebuggerDataLock());
+ return (m_latestJitInfo != NULL);
+}
+
+/******************************************************************************
+ * Return true iff this has been EnCed since last time function was jitted.
+ ******************************************************************************/
+bool DebuggerMethodInfo::HasMoreRecentEnCVersion()
+{
+ LIMITED_METHOD_CONTRACT;
+ return ((m_latestJitInfo != NULL) &&
+ (m_currentEnCVersion > m_latestJitInfo->m_encVersion));
+}
+
+/******************************************************************************
+ * Updated the instrumented-IL map
+ ******************************************************************************/
+void DebuggerMethodInfo::SetInstrumentedILMap(COR_IL_MAP * pMap, SIZE_T cEntries)
+{
+ InstrumentedILOffsetMapping mapping;
+ mapping.SetMappingInfo(cEntries, pMap);
+
+ GetRuntimeModule()->SetInstrumentedILOffsetMapping(m_token, mapping);
+
+ m_fHasInstrumentedILMap = true;
+}
+
+/******************************************************************************
+ * Get the JMC status for a given function.
+ ******************************************************************************/
+bool DebuggerMethodInfo::IsJMCFunction()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_fJMCStatus;
+}
+
+/******************************************************************************
+ * Set the JMC status to a given value
+ ******************************************************************************/
+void DebuggerMethodInfo::SetJMCStatus(bool fStatus)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(g_pDebugger->HasDebuggerDataLock());
+
+ // First check if this is a no-op.
+ // Do this first b/c there may be some cases where we don't have a DebuggerModule
+ // yet but are still calling SetJMCStatus(false), like if we detach before attach is complete.
+ bool fOldStatus = IsJMCFunction();
+
+ if (fOldStatus == fStatus)
+ {
+ // if no change, then there's nothing to do.
+ LOG((LF_CORDB,LL_EVERYTHING, "DMI::SetJMCStatus: %p, keeping old status, %d\n", this, fStatus));
+ return;
+ }
+
+ // For a perf-optimization, our Module needs to know if it has any user
+ // code. If it doesn't, it shouldn't dispatch through the JMC probes.
+ // So modules keep a count of # of JMC functions - if the count is 0, the
+ // module can set is JMC probe flag to 0 and skip the JMC probes.
+ Module * pRuntimeModule = this->GetRuntimeModule();
+
+ // Update the module's count.
+ if (!fStatus)
+ {
+ LOG((LF_CORDB,LL_EVERYTHING, "DMI::SetJMCStatus: %p, changing to non-user code\n", this));
+ _ASSERTE(pRuntimeModule->HasAnyJMCFunctions());
+ pRuntimeModule->DecJMCFuncCount();
+ }
+ else
+ {
+ LOG((LF_CORDB,LL_EVERYTHING, "DMI::SetJMCStatus: %p, changing to user code\n", this));
+ pRuntimeModule->IncJMCFuncCount();
+ _ASSERTE(pRuntimeModule->HasAnyJMCFunctions());
+ }
+
+ m_fJMCStatus = fStatus;
+
+ // We should update our module's JMC status...
+ g_pDebugger->UpdateModuleJMCFlag(pRuntimeModule, DebuggerController::GetTotalMethodEnter() != 0);
+
+}
+
+// Get an iterator that will go through ALL native code-blobs (DJI) in the specified
+// AppDomain, optionally filtered by loader module (if pLoaderModuleFilter != NULL).
+// This is EnC/ Generics / Prejit aware.
+void DebuggerMethodInfo::IterateAllDJIs(AppDomain * pAppDomain, Module * pLoaderModuleFilter, DebuggerMethodInfo::DJIIterator * pEnum)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pEnum != NULL);
+ _ASSERTE(pAppDomain != NULL);
+
+ // Esnure we have DJIs for everything.
+ CreateDJIsForNativeBlobs(pAppDomain, pLoaderModuleFilter);
+
+ pEnum->m_pCurrent = m_latestJitInfo;
+ pEnum->m_pLoaderModuleFilter = pLoaderModuleFilter;
+
+ // Advance to the first DJI that passes the filter
+ pEnum->Next(TRUE);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Bring the DJI cache up to date.
+//
+// Arguments:
+// * pAppDomain - Create DJIs only for this AppDomain
+// * pLoaderModuleFilter - If non-NULL, create DJIs only for MethodDescs whose
+// loader module matches this one. (This can be different from m_module in the
+// case of generics defined in one module and instantiated in another). If
+// non-NULL, create DJIs for all modules in pAppDomain.
+//
+
+void DebuggerMethodInfo::CreateDJIsForNativeBlobs(AppDomain * pAppDomain, Module * pLoaderModuleFilter /* = NULL */)
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // If we're not stopped and the module we're iterating over allows types to load,
+ // then it's possible new native blobs are being created underneath us.
+ _ASSERTE(g_pDebugger->IsStopped() || ((pLoaderModuleFilter != NULL) && !pLoaderModuleFilter->IsReadyForTypeLoad()));
+
+ // @todo - we really only need to do this if the stop-counter goes up (else we know nothing new is added).
+ // B/c of generics, it's possible that new instantiations of a method may have been jitted.
+ // So just loop through all known instantiations and ensure that we have all the DJIs.
+ // Note that this iterator won't show previous EnC versions, but we're already guaranteed to
+ // have DJIs for every verision of a method that was EnCed.
+ // This also handles the possibility of getting the same methoddesc back from the iterator.
+ // It also lets EnC + generics play nice together (including if an generic method was EnC-ed)
+ LoadedMethodDescIterator it(pAppDomain, m_module, m_token);
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+ while (it.Next(pDomainAssembly.This()))
+ {
+ MethodDesc * pDesc = it.Current();
+ if (!pDesc->HasNativeCode())
+ {
+ continue;
+ }
+
+ Module * pLoaderModule = pDesc->GetLoaderModule();
+
+ // Obey the module filter if it's provided
+ if ((pLoaderModuleFilter != NULL) && (pLoaderModuleFilter != pLoaderModule))
+ continue;
+
+ // Skip modules that are unloaded, but still hanging around. Note that we can't use DebuggerModule for this check
+ // because of it is deleted pretty early during unloading, and we do not want to recreate it.
+ if (pLoaderModule->GetLoaderAllocator()->IsUnloaded())
+ continue;
+
+ // We just ask for the DJI to ensure that it's lazily created.
+ // This should only fail in an oom scenario.
+ DebuggerJitInfo * djiTest = g_pDebugger->GetLatestJitInfoFromMethodDesc(pDesc);
+ if (djiTest == NULL)
+ {
+ // We're oom. Give up.
+ ThrowOutOfMemory();
+ return;
+ }
+ }
+}
+
+/*
+ * GetLatestJitInfo
+ *
+ * This routine returns the lastest DJI we have for a particular DMI.
+ * DJIs are lazily created.
+ * Parameters:
+ * None.
+ *
+ * Returns
+ * a possibly NULL pointer to a DJI.
+ *
+ */
+
+// For logging and other internal purposes, provide a non-initializing accessor.
+DebuggerJitInfo* DebuggerMethodInfo::GetLatestJitInfo_NoCreate()
+{
+ return m_latestJitInfo;
+}
+
+
+DebuggerMethodInfoTable::DebuggerMethodInfoTable() : CHashTableAndData<CNewZeroData>(101)
+{
+ CONTRACTL
+ {
+ WRAPPER(THROWS);
+ GC_NOTRIGGER;
+
+ CONSTRUCTOR_CHECK;
+ }
+ CONTRACTL_END;
+
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ HRESULT hr = NewInit(101, sizeof(DebuggerMethodInfoEntry), 101);
+
+ if (FAILED(hr))
+ {
+ ThrowWin32(hr);
+ }
+}
+
+HRESULT DebuggerMethodInfoTable::AddMethodInfo(Module *pModule,
+ mdMethodDef token,
+ DebuggerMethodInfo *mi)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+
+ INSTANCE_CHECK;
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(mi));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000, "DMIT::AMI Adding dmi:0x%x Mod:0x%x tok:"
+ "0x%x nVer:0x%x\n", mi, pModule, token, mi->GetCurrentEnCVersion()));
+
+ _ASSERTE(mi != NULL);
+
+ _ASSERTE(g_pDebugger->HasDebuggerDataLock());
+
+ HRESULT hr = OverwriteMethodInfo(pModule, token, mi, TRUE);
+ if (hr == S_OK)
+ return hr;
+
+ DebuggerMethodInfoKey dmik;
+ dmik.pModule = pModule;
+ dmik.token = token;
+
+ DebuggerMethodInfoEntry *dmie =
+ (DebuggerMethodInfoEntry *) Add(HASH(&dmik));
+
+ if (dmie != NULL)
+ {
+ dmie->key.pModule = pModule;
+ dmie->key.token = token;
+ dmie->mi = mi;
+
+ LOG((LF_CORDB, LL_INFO1000, "DMIT::AJI: mod:0x%x tok:0%x ",
+ pModule, token));
+ return S_OK;
+ }
+
+ ThrowOutOfMemory();
+ return S_OK;
+}
+
+HRESULT DebuggerMethodInfoTable::OverwriteMethodInfo(Module *pModule,
+ mdMethodDef token,
+ DebuggerMethodInfo *mi,
+ BOOL fOnlyIfNull)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(mi));
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000, "DMIT::OJI: dmi:0x%x mod:0x%x tok:0x%x\n", mi,
+ pModule, token));
+
+ _ASSERTE(g_pDebugger->HasDebuggerDataLock());
+
+ DebuggerMethodInfoKey dmik;
+ dmik.pModule = pModule;
+ dmik.token = token;
+
+ DebuggerMethodInfoEntry *entry
+ = (DebuggerMethodInfoEntry *) Find(HASH(&dmik), KEY(&dmik));
+ if (entry != NULL)
+ {
+ if ( (fOnlyIfNull &&
+ entry->mi == NULL) ||
+ !fOnlyIfNull)
+ {
+ entry->mi = mi;
+
+ LOG((LF_CORDB, LL_INFO1000, "DMIT::OJI: mod:0x%x tok:0x%x remap"
+ "nVer:0x%x\n", pModule, token, entry->nVersionLastRemapped));
+ return S_OK;
+ }
+ }
+
+ return E_FAIL;
+}
+
+// pModule is being destroyed - remove any entries that belong to it. Why?
+// (a) Correctness: the module can be reloaded at the same address,
+// which will cause accidental matches with our hashtable (indexed by
+// {Module*,mdMethodDef}
+// (b) Perf: don't waste the memory!
+void DebuggerMethodInfoTable::ClearMethodsOfModule(Module *pModule)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(g_pDebugger->HasDebuggerDataLock());
+
+ LOG((LF_CORDB, LL_INFO1000000, "CMOM:mod:0x%x (%S)\n", pModule
+ ,pModule->GetDebugName()));
+
+ HASHFIND info;
+
+ DebuggerMethodInfoEntry *entry
+ = (DebuggerMethodInfoEntry *) FindFirstEntry(&info);
+ while(entry != NULL)
+ {
+ Module *pMod = entry->key.pModule ;
+ if (pMod == pModule)
+ {
+ // This method actually got mitted, at least
+ // once - remove all version info.
+ while(entry->mi != NULL)
+ {
+ DeleteEntryDMI(entry);
+ }
+
+ Delete(HASH(&(entry->key)), (HASHENTRY*)entry);
+ }
+ else
+ {
+ //
+ // Delete generic DJIs that have lifetime attached to this module
+ //
+ DebuggerMethodInfo * dmi = entry->mi;
+ while (dmi != NULL)
+ {
+ DebuggerJitInfo * dji = dmi->GetLatestJitInfo_NoCreate();
+ while (dji != NULL)
+ {
+ DebuggerJitInfo * djiPrev = dji->m_prevJitInfo;;
+
+ if (dji->m_pLoaderModule == pModule)
+ dmi->DeleteJitInfo(dji);
+
+ dji = djiPrev;
+ }
+
+ dmi = dmi->m_prevMethodInfo;
+ }
+ }
+
+ entry = (DebuggerMethodInfoEntry *) FindNextEntry(&info);
+ }
+}
+
+void DebuggerMethodInfoTable::DeleteEntryDMI(DebuggerMethodInfoEntry *entry)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK; // DeleteInteropSafe() eventually calls DebuggerMethodInfo::DeleteJitInfoList
+ // which locks.
+ }
+ CONTRACTL_END;
+
+ DebuggerMethodInfo *dmiPrev = entry->mi->m_prevMethodInfo;
+ TRACE_FREE(entry->mi);
+ DeleteInteropSafe(entry->mi);
+ entry->mi = dmiPrev;
+ if ( dmiPrev != NULL )
+ dmiPrev->m_nextMethodInfo = NULL;
+}
+
+#endif // #ifndef DACCESS_COMPILE
+
+DebuggerJitInfo *DebuggerJitInfo::GetJitInfoByAddress(const BYTE *pbAddr )
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ DebuggerJitInfo *dji = this;
+
+#ifdef LOGGING
+ LOG((LF_CORDB,LL_INFO10000,"DJI:GJIBA finding DJI "
+ "corresponding to addr 0x%p, starting with 0x%p\n", pbAddr, dji));
+#endif //LOGGING
+
+ // If it's not NULL, but not in the range m_addrOfCode to end of function,
+ // then get the previous one.
+ while( dji != NULL &&
+ !CodeRegionInfo::GetCodeRegionInfo(dji).IsMethodAddress(pbAddr))
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DJI:GJIBA: pbAddr 0x%p is not in code "
+ "0x%p (size:0x%p)\n", pbAddr, dji->m_addrOfCode,
+ dji->m_sizeOfCode));
+ dji = dji->m_prevJitInfo;
+ }
+
+#ifdef LOGGING
+ if (dji == NULL)
+ {
+ LOG((LF_CORDB,LL_INFO10000,"DJI:GJIBA couldn't find a DJI "
+ "corresponding to addr 0x%p\n", pbAddr));
+ }
+#endif //LOGGING
+ return dji;
+}
+
+PTR_DebuggerJitInfo DebuggerMethodInfo::GetLatestJitInfo(MethodDesc *mdesc)
+{
+ // dac checks ngen'ed image content first, so
+ // only check for existing JIT info.
+#ifndef DACCESS_COMPILE
+
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ CALLED_IN_DEBUGGERDATALOCK_HOLDER_SCOPE_MAY_GC_TRIGGERS_CONTRACT;
+ PRECONDITION(!g_pDebugger->HasDebuggerDataLock());
+ }
+ CONTRACTL_END;
+
+
+ if (m_latestJitInfo && m_latestJitInfo->m_fd == mdesc && !m_latestJitInfo->m_fd->HasClassOrMethodInstantiation())
+ return m_latestJitInfo;
+
+ // This ensures that there is an entry in the DJI list for this particular MethodDesc.
+ // in the case of generic code it may not be the first entry in the list.
+ FindOrCreateInitAndAddJitInfo(mdesc);
+
+#endif // #ifndef DACCESS_COMPILE
+
+ return m_latestJitInfo;
+}
+
+DebuggerMethodInfo *DebuggerMethodInfoTable::GetMethodInfo(Module *pModule, mdMethodDef token)
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ // CHECK_DMI_TABLE;
+
+ // @review. One of the BVTs causes this to be called before the table is initialized
+ // In particular, the changes to BREAKPOINT_ADD mean that this table is now consulted
+ // to determine if we have ever seen the method, rather than a call to LookupMethodDesc,
+ // which would have just returned NULL. In general it seems OK to consult this table
+ // when it is empty, so I've added this....
+ if (this == NULL)
+ return NULL;
+
+ DebuggerMethodInfoKey dmik;
+ dmik.pModule = dac_cast<PTR_Module>(pModule);
+ dmik.token = token;
+
+ DebuggerMethodInfoEntry *entry = dac_cast<PTR_DebuggerMethodInfoEntry>(Find(HASH(&dmik), KEY(&dmik)));
+
+ if (entry == NULL )
+ {
+ return NULL;
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DMI::GMI: for methodDef 0x%x, got 0x%x prev:0x%x\n",
+ token, entry->mi, (entry->mi?entry->mi->m_prevMethodInfo:0)));
+ return entry->mi;
+ }
+}
+
+
+DebuggerMethodInfo *DebuggerMethodInfoTable::GetFirstMethodInfo(HASHFIND *info)
+{
+ CONTRACT(DebuggerMethodInfo*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ PRECONDITION(CheckPointer(info));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ _ASSERTE(g_pDebugger->HasDebuggerDataLock());
+
+ DebuggerMethodInfoEntry *entry = PTR_DebuggerMethodInfoEntry
+ (PTR_HOST_TO_TADDR(FindFirstEntry(info)));
+ if (entry == NULL)
+ RETURN NULL;
+ else
+ RETURN entry->mi;
+}
+
+DebuggerMethodInfo *DebuggerMethodInfoTable::GetNextMethodInfo(HASHFIND *info)
+{
+ CONTRACT(DebuggerMethodInfo*)
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ PRECONDITION(CheckPointer(info));
+ POSTCONDITION(CheckPointer(RETVAL, NULL_OK));
+ }
+ CONTRACT_END;
+
+ _ASSERTE(g_pDebugger->HasDebuggerDataLock());
+
+ DebuggerMethodInfoEntry *entry = PTR_DebuggerMethodInfoEntry
+ (PTR_HOST_TO_TADDR(FindNextEntry(info)));
+
+ // We may have incremented the version number
+ // for methods that never got JITted, so we should
+ // pretend like they don't exist here.
+ while (entry != NULL &&
+ entry->mi == NULL)
+ {
+ entry = PTR_DebuggerMethodInfoEntry
+ (PTR_HOST_TO_TADDR(FindNextEntry(info)));
+ }
+
+ if (entry == NULL)
+ RETURN NULL;
+ else
+ RETURN entry->mi;
+}
+
+
+
+#ifdef DACCESS_COMPILE
+void
+DebuggerMethodInfoEntry::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ SUPPORTS_DAC;
+
+ // This structure is in an array in the hash
+ // so the 'this' is implicitly enumerated by the
+ // array enum in CHashTable.
+
+ // For a MiniDumpNormal, what is needed for modules is already enumerated elsewhere.
+ // Don't waste time doing it here an extra time. Also, this will add many MB extra into the dump.
+ if ((key.pModule.IsValid()) &&
+ CLRDATA_ENUM_MEM_MINI != flags
+ && CLRDATA_ENUM_MEM_TRIAGE != flags)
+ {
+ key.pModule->EnumMemoryRegions(flags, true);
+ }
+
+ while (mi.IsValid())
+ {
+ mi->EnumMemoryRegions(flags);
+ mi = mi->m_prevMethodInfo;
+ }
+}
+
+void
+DebuggerMethodInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ DAC_ENUM_DTHIS();
+ SUPPORTS_DAC;
+
+ if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
+ {
+ // Modules are enumerated already for minidumps, save the empty calls.
+ if (m_module.IsValid())
+ {
+ m_module->EnumMemoryRegions(flags, true);
+ }
+
+ }
+
+ PTR_DebuggerJitInfo jitInfo = m_latestJitInfo;
+ while (jitInfo.IsValid())
+ {
+ jitInfo->EnumMemoryRegions(flags);
+ jitInfo = jitInfo->m_prevJitInfo;
+ }
+}
+
+void
+DebuggerJitInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ DAC_ENUM_DTHIS();
+ SUPPORTS_DAC;
+
+ if (m_methodInfo.IsValid())
+ {
+ m_methodInfo->EnumMemoryRegions(flags);
+ }
+
+ if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
+ {
+ if (m_fd.IsValid())
+ {
+ m_fd->EnumMemoryRegions(flags);
+ }
+
+ DacEnumMemoryRegion(PTR_TO_TADDR(GetSequenceMap()),
+ GetSequenceMapCount() * sizeof(DebuggerILToNativeMap));
+ DacEnumMemoryRegion(PTR_TO_TADDR(GetVarNativeInfo()),
+ GetVarNativeInfoCount() *
+ sizeof(ICorDebugInfo::NativeVarInfo));
+ }
+}
+
+
+void DebuggerMethodInfoTable::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+{
+ WRAPPER_NO_CONTRACT;
+
+ DAC_ENUM_VTHIS();
+ CHashTableAndData<CNewZeroData>::EnumMemoryRegions(flags);
+
+ for (ULONG i = 0; i < m_iEntries; i++)
+ {
+ DebuggerMethodInfoEntry* entry =
+ PTR_DebuggerMethodInfoEntry(PTR_HOST_TO_TADDR(EntryPtr(i)));
+ entry->EnumMemoryRegions(flags);
+ }
+}
+#endif // #ifdef DACCESS_COMPILE
diff --git a/src/debug/ee/i386/.gitmirror b/src/debug/ee/i386/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/debug/ee/i386/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/debug/ee/i386/dbghelpers.asm b/src/debug/ee/i386/dbghelpers.asm
new file mode 100644
index 0000000000..f6e3a1611c
--- /dev/null
+++ b/src/debug/ee/i386/dbghelpers.asm
@@ -0,0 +1,101 @@
+;
+; Copyright (c) Microsoft. All rights reserved.
+; Licensed under the MIT license. See LICENSE file in the project root for full license information.
+;
+
+; ==++==
+;
+
+;
+; ==--==
+;
+; *** NOTE: If you make changes to this file, propagate the changes to
+; dbghelpers.s in this directory
+
+ .586
+ .model flat
+ .code
+
+ extern _FuncEvalHijackWorker@4:PROC
+
+; @dbgtodo- once we port Funceval, use the ExceptionHijack stub instead of this func-eval stub.
+;
+; This is the method that we hijack a thread running managed code. It calls
+; FuncEvalHijackWorker, which actually performs the func eval, then jumps to
+; the patch address so we can complete the cleanup.
+;
+; Note: the parameter is passed in eax - see Debugger::FuncEvalSetup for
+; details
+;
+_FuncEvalHijack@0 proc public
+ push eax ; the ptr to the DebuggerEval
+ call _FuncEvalHijackWorker@4
+ jmp eax ; return is the patch addresss to jmp to
+_FuncEvalHijack@0 endp
+
+
+
+;
+; Flares for interop debugging.
+; Flares are exceptions (breakpoints) at well known addresses which the RS
+; listens for when interop debugging.
+;
+
+; This exception is from managed code.
+_SignalHijackStartedFlare@0 proc public
+ int 3
+ ; make sure that the basic block is unique
+ test eax,1
+ ret
+_SignalHijackStartedFlare@0 endp
+
+; Start the handoff
+_ExceptionForRuntimeHandoffStartFlare@0 proc public
+ int 3
+ ; make sure that the basic block is unique
+ test eax,2
+ ret
+_ExceptionForRuntimeHandoffStartFlare@0 endp
+
+; Finish the handoff.
+_ExceptionForRuntimeHandoffCompleteFlare@0 proc public
+ int 3
+ ; make sure that the basic block is unique
+ test eax,3
+ ret
+_ExceptionForRuntimeHandoffCompleteFlare@0 endp
+
+; Return thread to pre-hijacked context.
+_SignalHijackCompleteFlare@0 proc public
+ int 3
+ ; make sure that the basic block is unique
+ test eax,4
+ ret
+_SignalHijackCompleteFlare@0 endp
+
+; This exception is from unmanaged code.
+_ExceptionNotForRuntimeFlare@0 proc public
+ int 3
+ ; make sure that the basic block is unique
+ test eax,5
+ ret
+_ExceptionNotForRuntimeFlare@0 endp
+
+; The Runtime is synchronized.
+_NotifyRightSideOfSyncCompleteFlare@0 proc public
+ int 3
+ ; make sure that the basic block is unique
+ test eax,6
+ ret
+_NotifyRightSideOfSyncCompleteFlare@0 endp
+
+
+
+; This goes at the end of the assembly file
+ end
+
+
+
+
+
+
diff --git a/src/debug/ee/i386/debuggerregdisplayhelper.cpp b/src/debug/ee/i386/debuggerregdisplayhelper.cpp
new file mode 100644
index 0000000000..2435b2cc9c
--- /dev/null
+++ b/src/debug/ee/i386/debuggerregdisplayhelper.cpp
@@ -0,0 +1,19 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+/* ------------------------------------------------------------------------- *
+ * DebuggerRegDisplayHelper.cpp -- implementation of the platform-dependent
+//
+
+ * methods for transferring information between
+ * REGDISPLAY and DebuggerREGDISPLAY
+ * ------------------------------------------------------------------------- */
+
+#include "stdafx.h"
+
+
+void CopyREGDISPLAY(REGDISPLAY* pDst, REGDISPLAY* pSrc)
+{
+ *pDst = *pSrc;
+}
diff --git a/src/debug/ee/i386/primitives.cpp b/src/debug/ee/i386/primitives.cpp
new file mode 100644
index 0000000000..31f79fca71
--- /dev/null
+++ b/src/debug/ee/i386/primitives.cpp
@@ -0,0 +1,12 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+
+#include "stdafx.h"
+
+#include "../../shared/i386/primitives.cpp"
+
+
diff --git a/src/debug/ee/i386/x86walker.cpp b/src/debug/ee/i386/x86walker.cpp
new file mode 100644
index 0000000000..b28bb80d1e
--- /dev/null
+++ b/src/debug/ee/i386/x86walker.cpp
@@ -0,0 +1,501 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: x86walker.cpp
+//
+
+//
+// x86 instruction decoding/stepping logic
+//
+//*****************************************************************************
+
+#include "stdafx.h"
+
+#include "walker.h"
+
+#include "frames.h"
+#include "openum.h"
+
+
+#ifdef _TARGET_X86_
+
+//
+// The x86 walker is currently pretty minimal. It only recognizes call and return opcodes, plus a few jumps. The rest
+// is treated as unknown.
+//
+void NativeWalker::Decode()
+{
+ const BYTE *ip = m_ip;
+
+ m_type = WALK_UNKNOWN;
+ m_skipIP = NULL;
+ m_nextIP = NULL;
+
+ LOG((LF_CORDB, LL_INFO100000, "NW:Decode: m_ip 0x%x\n", m_ip));
+ //
+ // Skip instruction prefixes
+ //
+ do
+ {
+ switch (*ip)
+ {
+ // Segment overrides
+ case 0x26: // ES
+ case 0x2E: // CS
+ case 0x36: // SS
+ case 0x3E: // DS
+ case 0x64: // FS
+ case 0x65: // GS
+
+ // Size overrides
+ case 0x66: // Operand-Size
+ case 0x67: // Address-Size
+
+ // Lock
+ case 0xf0:
+
+ // String REP prefixes
+ case 0xf1:
+ case 0xf2: // REPNE/REPNZ
+ case 0xf3:
+ LOG((LF_CORDB, LL_INFO10000, "NW:Decode: prefix:%0.2x ", *ip));
+ ip++;
+ continue;
+
+ default:
+ break;
+ }
+ } while (0);
+
+ // Read the opcode
+ m_opcode = *ip++;
+
+ LOG((LF_CORDB, LL_INFO100000, "NW:Decode: ip 0x%x, m_opcode:%0.2x\n", ip, m_opcode));
+
+ if (m_opcode == 0xcc)
+ {
+ m_opcode = DebuggerController::GetPatchedOpcode(m_ip);
+ LOG((LF_CORDB, LL_INFO100000, "NW:Decode after patch look up: m_opcode:%0.2x\n", m_opcode));
+ }
+
+ // Analyze what we can of the opcode
+ switch (m_opcode)
+ {
+ case 0xff:
+ {
+
+ BYTE modrm = *ip++;
+ BYTE mod = (modrm & 0xC0) >> 6;
+ BYTE reg = (modrm & 0x38) >> 3;
+ BYTE rm = (modrm & 0x07);
+
+ BYTE *result = 0;
+ WORD displace = 0;
+
+ if ((reg != 2) && (reg != 3) && (reg != 4) && (reg != 5)) {
+ //
+ // This is not a CALL or JMP instruction, return, unknown.
+ //
+ return;
+ }
+
+
+ if (m_registers != NULL)
+ {
+ // Only try to decode registers if we actually have reg sets.
+ switch (mod) {
+ case 0:
+ case 1:
+ case 2:
+
+ if (rm == 4) {
+
+ //
+ // Get values from the SIB byte
+ //
+ BYTE ss = (*ip & 0xC0) >> 6;
+ BYTE index = (*ip & 0x38) >> 3;
+ BYTE base = (*ip & 0x7);
+
+ ip++;
+
+ //
+ // Get starting value
+ //
+ if ((mod == 0) && (base == 5)) {
+ result = 0;
+ } else {
+ result = (BYTE *)(size_t)GetRegisterValue(base);
+ }
+
+ //
+ // Add in the [index]
+ //
+ if (index != 0x4) {
+ result = result + (GetRegisterValue(index) << ss);
+ }
+
+ //
+ // Finally add in the offset
+ //
+ if (mod == 0) {
+
+ if (base == 5) {
+ result = result + *((unsigned int *)ip);
+ displace = 7;
+ } else {
+ displace = 3;
+ }
+
+ } else if (mod == 1) {
+
+ result = result + *((char *)ip);
+ displace = 4;
+
+ } else { // == 2
+
+ result = result + *((unsigned int *)ip);
+ displace = 7;
+
+ }
+
+ } else {
+
+ //
+ // Get the value we need from the register.
+ //
+
+ if ((mod == 0) && (rm == 5)) {
+ result = 0;
+ } else {
+ result = (BYTE *)GetRegisterValue(rm);
+ }
+
+ if (mod == 0) {
+
+ if (rm == 5) {
+ result = result + *((unsigned int *)ip);
+ displace = 6;
+ } else {
+ displace = 2;
+ }
+
+ } else if (mod == 1) {
+
+ result = result + *((char *)ip);
+ displace = 3;
+
+ } else { // == 2
+
+ result = result + *((unsigned int *)ip);
+ displace = 6;
+
+ }
+
+ }
+
+ //
+ // Now dereference thru the result to get the resulting IP.
+ //
+
+ // If result is bad, then this means we can't predict what the nextIP will be.
+ // That's ok - we just leave m_nextIp=NULL. We can still provide callers
+ // with the proper walk-type.
+ // In practice, this shouldn't happen unless the jit emits bad opcodes.
+ if (result != NULL)
+ {
+ result = (BYTE *)(*((unsigned int *)result));
+ }
+
+ break;
+
+ case 3:
+ default:
+
+ result = (BYTE *)GetRegisterValue(rm);
+ displace = 2;
+ break;
+
+ }
+ } // have registers
+
+ if ((reg == 2) || (reg == 3)) {
+ m_type = WALK_CALL;
+ } else if ((reg == 4) || (reg == 5)) {
+ m_type = WALK_BRANCH;
+ } else {
+ break;
+ }
+
+ if (m_registers != NULL)
+ {
+ m_nextIP = result;
+ m_skipIP = m_ip + displace;
+ }
+
+ break;
+ } // end of 0xFF case
+
+ case 0xe8:
+ {
+ m_type = WALK_CALL;
+
+ UINT32 disp = *((UINT32*)ip);
+ m_nextIP = ip + 4 + disp;
+ m_skipIP = ip + 4;
+
+ break;
+ }
+
+ case 0xe9:
+ {
+ m_type = WALK_BRANCH;
+
+ INT32 disp = *((INT32*)ip);
+ m_nextIP = ip + 4 + disp;
+ m_skipIP = ip + 4;
+
+ break;
+ }
+
+ case 0x9a:
+ m_type = WALK_CALL;
+
+ m_nextIP = (BYTE*) *((UINT32*)ip);
+ m_skipIP = ip + 4;
+
+ break;
+
+ case 0xc2:
+ case 0xc3:
+ case 0xca:
+ case 0xcb:
+ m_type = WALK_RETURN;
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+//
+// Given a regdisplay and a register number, return the value of the register.
+//
+
+DWORD NativeWalker::GetRegisterValue(int registerNumber)
+{
+ // If we're going to decode a register, then we'd better have a valid register set.
+ PREFIX_ASSUME(m_registers != NULL);
+
+ switch (registerNumber)
+ {
+ case 0:
+ return *m_registers->pEax;
+ break;
+ case 1:
+ return *m_registers->pEcx;
+ break;
+ case 2:
+ return *m_registers->pEdx;
+ break;
+ case 3:
+ return *m_registers->pEbx;
+ break;
+ case 4:
+ return m_registers->Esp;
+ break;
+ case 5:
+ return *m_registers->pEbp;
+ break;
+ case 6:
+ return *m_registers->pEsi;
+ break;
+ case 7:
+ return *m_registers->pEdi;
+ break;
+ default:
+ _ASSERTE(!"Invalid register number!");
+ }
+
+ return 0;
+}
+
+
+// static
+void NativeWalker::DecodeInstructionForPatchSkip(const BYTE *address, InstructionAttribute * pInstrAttrib)
+{
+ //
+ // Skip instruction prefixes
+ //
+
+ LOG((LF_CORDB, LL_INFO10000, "Patch decode: "));
+
+ if (pInstrAttrib == NULL)
+ return;
+
+ const BYTE * origAddr = address;
+
+ do
+ {
+ switch (*address)
+ {
+ // Segment overrides
+ case 0x26: // ES
+ case 0x2E: // CS
+ case 0x36: // SS
+ case 0x3E: // DS
+ case 0x64: // FS
+ case 0x65: // GS
+
+ // Size overrides
+ case 0x66: // Operand-Size
+ case 0x67: // Address-Size
+
+ // Lock
+ case 0xf0:
+
+ // String REP prefixes
+ case 0xf2: // REPNE/REPNZ
+ case 0xf3:
+ LOG((LF_CORDB, LL_INFO10000, "prefix:%0.2x ", *address));
+ address++;
+ continue;
+
+ default:
+ break;
+ }
+ } while (0);
+
+ // There can be at most 4 prefixes.
+ _ASSERTE(((address - origAddr) <= 4));
+
+ //
+ // Look at opcode to tell if it's a call or an
+ // absolute branch.
+ //
+
+ pInstrAttrib->Reset();
+
+ // Note that we only care about m_cbInstr, m_cbDisp, and m_dwOffsetToDisp for relative branches
+ // (either call or jump instructions).
+
+ switch (*address)
+ {
+ case 0xEA: // JMP far
+ case 0xC2: // RET
+ case 0xC3: // RET N
+ pInstrAttrib->m_fIsAbsBranch = true;
+ LOG((LF_CORDB, LL_INFO10000, "ABS:%0.2x\n", *address));
+ break;
+
+ case 0xE8: // CALL relative
+ pInstrAttrib->m_fIsCall = true;
+ pInstrAttrib->m_fIsRelBranch = true;
+ LOG((LF_CORDB, LL_INFO10000, "CALL REL:%0.2x\n", *address));
+
+ address += 1;
+ pInstrAttrib->m_cbDisp = 4;
+ break;
+
+ case 0xC8: // ENTER
+ pInstrAttrib->m_fIsCall = true;
+ pInstrAttrib->m_fIsAbsBranch = true;
+ LOG((LF_CORDB, LL_INFO10000, "CALL ABS:%0.2x\n", *address));
+ break;
+
+ case 0xFF: // CALL/JMP modr/m
+
+ //
+ // Read opcode modifier from modr/m
+ //
+
+ switch ((address[1]&0x38)>>3)
+ {
+ case 2:
+ case 3:
+ pInstrAttrib->m_fIsCall = true;
+ // fall through
+ case 4:
+ case 5:
+ pInstrAttrib->m_fIsAbsBranch = true;
+ }
+ LOG((LF_CORDB, LL_INFO10000, "CALL/JMP modr/m:%0.2x\n", *address));
+ break;
+
+ case 0x9A: // CALL ptr16:32
+ pInstrAttrib->m_fIsCall = true;
+ pInstrAttrib->m_fIsAbsBranch = true;
+ break;
+
+ case 0xEB: // JMP rel8
+ pInstrAttrib->m_fIsRelBranch = true;
+
+ address += 1;
+ pInstrAttrib->m_cbDisp = 1;
+ break;
+
+ case 0xE9: // JMP rel32
+ pInstrAttrib->m_fIsRelBranch = true;
+
+ address += 1;
+ pInstrAttrib->m_cbDisp = 4;
+ break;
+
+ case 0x0F: // Jcc (conditional jump)
+ // If the second opcode byte is betwen 0x80 and 0x8F, then it's a conditional jump.
+ // Conditional jumps are always relative.
+ if ((address[1] & 0xF0) == 0x80)
+ {
+ pInstrAttrib->m_fIsCond = true;
+ pInstrAttrib->m_fIsRelBranch = true;
+
+ address += 2; // 2-byte opcode
+ pInstrAttrib->m_cbDisp = 4;
+ }
+ break;
+
+ case 0x70:
+ case 0x71:
+ case 0x72:
+ case 0x73:
+ case 0x74:
+ case 0x75:
+ case 0x76:
+ case 0x77:
+ case 0x78:
+ case 0x79:
+ case 0x7A:
+ case 0x7B:
+ case 0x7C:
+ case 0x7D:
+ case 0x7E:
+ case 0x7F: // Jcc (conditional jump)
+ case 0xE3: // JCXZ/JECXZ (jump on CX/ECX zero)
+ pInstrAttrib->m_fIsCond = true;
+ pInstrAttrib->m_fIsRelBranch = true;
+
+ address += 1;
+ pInstrAttrib->m_cbDisp = 1;
+ break;
+
+ default:
+ LOG((LF_CORDB, LL_INFO10000, "NORMAL:%0.2x\n", *address));
+ }
+
+ // Get additional information for relative branches.
+ if (pInstrAttrib->m_fIsRelBranch)
+ {
+ _ASSERTE(pInstrAttrib->m_cbDisp != 0);
+ pInstrAttrib->m_dwOffsetToDisp = (address - origAddr);
+
+ // Relative jump and call instructions don't use the SIB byte, and there is no immediate value.
+ // So the instruction size is just the offset to the displacement plus the size of the displacement.
+ pInstrAttrib->m_cbInstr = pInstrAttrib->m_dwOffsetToDisp + pInstrAttrib->m_cbDisp;
+ }
+}
+
+
+#endif
diff --git a/src/debug/ee/inprocdac.cpp b/src/debug/ee/inprocdac.cpp
new file mode 100644
index 0000000000..e451cf08bb
--- /dev/null
+++ b/src/debug/ee/inprocdac.cpp
@@ -0,0 +1,432 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: InProcDac.cpp
+//
+
+//
+//
+//
+//*****************************************************************************
+
+#include "stdafx.h"
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+
+#include "inprocdac.h"
+#include "dacdbiinterface.h"
+#include "cordebug.h"
+#include "metadata.h"
+
+InProcDac::InProcDac() :
+ m_pDacDbi(NULL),
+ m_pUnpacker(NULL)
+{
+}
+
+InProcDac::~InProcDac()
+{
+ Cleanup();
+}
+
+//
+// Debugger::InitializeDAC
+//
+// DAC is used in-process on the Mac and ARM devices.
+// This is similar to CordbProcess::CreateDacDbiInterface on Windows.
+// @dbgtodo : try and share some of this code with the RS equivalent?
+//
+void InProcDac::Initialize()
+{
+ CONTRACTL
+ {
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ // don't double-init
+ _ASSERTE(m_pDataTarget == NULL);
+ _ASSERTE(m_pDacDbi == NULL);
+ _ASSERTE(m_pUnpacker == NULL);
+
+ HRESULT hrStatus = S_OK;
+ HModuleHolder hDacDll;
+
+ //
+ // Load the access DLL from the same directory as the the current CLR DLL.
+ //
+ WCHAR wszRuntimePath[MAX_PATH]; // base directory of the runtime (including trailing /)
+ WCHAR wszAccessDllPath[MAX_PATH]; // full path to the DAC Dll
+
+ if (!WszGetModuleFileName(GetCLRModule(), wszRuntimePath, NumItems(wszRuntimePath)))
+ {
+ ThrowLastError();
+ }
+
+ const char pathSep = '\\';
+
+ // remove CLR filename
+ PWSTR pPathTail = wcsrchr(wszRuntimePath, pathSep);
+ if (!pPathTail)
+ {
+ ThrowHR(E_INVALIDARG);
+ }
+ pPathTail[1] = '\0';
+
+ // In the case where this function is called multiple times, save the module handle to the DAC shared
+ // library so that we won't try to free and load it multiple times.
+ if (m_hDacModule == NULL)
+ {
+ if (wcscpy_s(wszAccessDllPath, _countof(wszAccessDllPath), wszRuntimePath) ||
+ wcscat_s(wszAccessDllPath, _countof(wszAccessDllPath), MAKEDLLNAME_W(MAIN_DAC_MODULE_NAME_W)))
+ {
+ ThrowHR(E_INVALIDARG);
+ }
+
+ hDacDll.Assign(WszLoadLibrary(wszAccessDllPath));
+ if (!hDacDll)
+ {
+ CONSISTENCY_CHECK_MSGF(false,("Unable to find DAC dll: %s", wszAccessDllPath));
+
+ DWORD dwLastError = GetLastError();
+ if (dwLastError == ERROR_MOD_NOT_FOUND)
+ {
+ // Give a more specific error in the case where we can't find the DAC dll.
+ ThrowHR(CORDBG_E_DEBUG_COMPONENT_MISSING);
+ }
+ else
+ {
+ ThrowWin32(dwLastError);
+ }
+ }
+
+ // Succeeded. Now copy out.
+ m_hDacModule.Assign(hDacDll);
+ hDacDll.SuppressRelease();
+ }
+
+ // Create the data target
+ ReleaseHolder<InProcDataTarget> pDataTarget = new InProcDataTarget();
+
+ //
+ // Get the access interface, passing our callback interfaces (data target, and metadata lookup)
+ //
+
+ IDacDbiInterface::IMetaDataLookup * pMetaDataLookup = this;
+ IDacDbiInterface::IAllocator * pAllocator = this;
+
+ // Get the CLR instance ID - the base address of the CLR module
+ CORDB_ADDRESS clrInstanceId = reinterpret_cast<CORDB_ADDRESS>(GetCLRModule());
+
+ typedef HRESULT (STDAPICALLTYPE * PFN_DacDbiInterfaceInstance)(
+ ICorDebugDataTarget *,
+ CORDB_ADDRESS,
+ IDacDbiInterface::IAllocator *,
+ IDacDbiInterface::IMetaDataLookup *,
+ IDacDbiInterface **);
+
+ IDacDbiInterface* pInterfacePtr = NULL;
+ PFN_DacDbiInterfaceInstance pfnEntry = (PFN_DacDbiInterfaceInstance)
+ GetProcAddress(m_hDacModule, "DacDbiInterfaceInstance");
+
+ if (!pfnEntry)
+ {
+ ThrowLastError();
+ }
+
+ hrStatus = pfnEntry(pDataTarget, clrInstanceId,
+ pAllocator, pMetaDataLookup, &pInterfacePtr);
+ IfFailThrow(hrStatus);
+
+ // We now have a resource, pInterfacePtr, that needs to be freed.
+
+ m_pDacDbi = pInterfacePtr;
+ m_pDataTarget = pDataTarget.Extract();
+
+ // Enable DAC target consistency checking - we're in-proc and so better always be consistent
+ m_pDacDbi->DacSetTargetConsistencyChecks( true );
+ m_pUnpacker = new DDUnpack(pInterfacePtr, pAllocator); // throws
+}
+
+void InProcDac::Cleanup()
+{
+ CONTRACTL
+ {
+ NOTHROW; // backout code.
+ }
+ CONTRACTL_END;
+
+ if (m_pDacDbi != NULL)
+ {
+ m_pDacDbi->Destroy();
+ m_pDacDbi = NULL;
+ }
+
+ if(m_pUnpacker != NULL)
+ {
+ delete m_pUnpacker;
+ m_pUnpacker = NULL;
+ }
+
+ if (m_pDataTarget != NULL)
+ {
+ m_pDataTarget.Clear();
+ }
+
+ // Note that once we release this handle, the DAC module can be unloaded and all calls
+ // into DAC could be invalid.
+ if (m_hDacModule != NULL)
+ {
+ m_hDacModule.Clear();
+ }
+}
+
+HRESULT InProcDac::DoRequest(ReadBuffer * pSend, WriteBuffer * pResult)
+{
+ HRESULT hr = S_OK;
+
+ // Lazily initialize the DacDbiMarshalStub.
+ if (m_pDacDbi == NULL)
+ {
+ EX_TRY
+ {
+ Initialize();
+ }
+ EX_CATCH_HRESULT(hr);
+ IfFailRet(hr);
+ }
+
+ _ASSERTE(m_pDacDbi != NULL);
+
+ /*
+ * @dbgtodo : We have to make sure to call Flush whenever runtime data structures may have changed.
+ * Eg:
+ * - after every IPC event
+ * - whenever we suspend the process
+ * For now we rely on the RS to tell us when to flush, just like the Windows runtime. It's a little riskier
+ * in this case because the target is actually running code. Since the cost of copying locally is fairly
+ * low, it is probably best to just flush at the beginning and/or end of all DD requests (i.e. here).
+ * Flushing more that necessary may be best for performance.
+ * Note however that this could in theory expose lateng bugs where we've been getting away with bleeding
+ * DAC state across DD calls on Windows.
+ */
+ EX_TRY
+ {
+ m_pUnpacker->HandleDDMessage(pSend, pResult);
+ }
+ EX_CATCH_HRESULT(hr);
+ return hr;
+}
+
+#ifndef DACCESS_COMPILE
+IMDInternalImport * InProcDac::LookupMetaData(VMPTR_PEFile addressPEFile, bool &isILMetaDataForNGENImage)
+{
+ isILMetaDataForNGENImage = false;
+ PEFile* peFile = addressPEFile.GetRawPtr();
+ return peFile->GetPersistentMDImport();
+}
+#endif
+//***************************************************************
+// InProcDataTarget implementation
+//***************************************************************
+
+//
+// InProcDataTarget ctor
+//
+// Instantiate an InProcDataTarget
+//
+InProcDac::InProcDataTarget::InProcDataTarget() :
+ m_ref(0)
+{
+}
+
+//
+// InProcDataTarget dtor
+//
+//
+InProcDac::InProcDataTarget::~InProcDataTarget()
+{
+}
+
+// Standard impl of IUnknown::QueryInterface
+HRESULT STDMETHODCALLTYPE
+InProcDac::InProcDataTarget::QueryInterface(
+ REFIID InterfaceId,
+ PVOID* pInterface)
+{
+ if (InterfaceId == IID_IUnknown)
+ {
+ *pInterface = static_cast<IUnknown *>(static_cast<ICorDebugDataTarget *>(this));
+ }
+ else if (InterfaceId == IID_ICorDebugDataTarget)
+ {
+ *pInterface = static_cast<ICorDebugDataTarget *>(this);
+ }
+ else if (InterfaceId == IID_ICorDebugMutableDataTarget)
+ {
+ *pInterface = static_cast<ICorDebugMutableDataTarget *>(this);
+ }
+ else
+ {
+ *pInterface = NULL;
+ return E_NOINTERFACE;
+ }
+
+ AddRef();
+ return S_OK;
+}
+
+// Standard impl of IUnknown::AddRef
+ULONG STDMETHODCALLTYPE
+InProcDac::InProcDataTarget::AddRef()
+{
+ LONG ref = InterlockedIncrement(&m_ref);
+ return ref;
+}
+
+// Standard impl of IUnknown::Release
+ULONG STDMETHODCALLTYPE
+InProcDac::InProcDataTarget::Release()
+{
+ LONG ref = InterlockedDecrement(&m_ref);
+ if (ref == 0)
+ {
+ delete this;
+ }
+ return ref;
+}
+
+// impl of interface method ICorDebugDataTarget::GetPlatform
+HRESULT STDMETHODCALLTYPE
+InProcDac::InProcDataTarget::GetPlatform(
+ CorDebugPlatform * pPlatform)
+{
+#if defined(_TARGET_X86_)
+ *pPlatform = CORDB_PLATFORM_WINDOWS_X86;
+#elif defined(_TARGET_AMD64_)
+ *pPlatform = CORDB_PLATFORM_WINDOWS_AMD64;
+#elif defined(_TARGET_ARM_)
+ *pPlatform = CORDB_PLATFORM_WINDOWS_ARM;
+#else
+#error Unknown Processor.
+#endif // platform
+
+ return S_OK;
+}
+
+// impl of interface method ICorDebugDataTarget::ReadVirtual
+HRESULT STDMETHODCALLTYPE
+InProcDac::InProcDataTarget::ReadVirtual(
+ CORDB_ADDRESS address,
+ PBYTE pBuffer,
+ ULONG32 cbRequestSize,
+ ULONG32 * pcbRead)
+{
+ void * pSrc = reinterpret_cast<void*>(address);
+ memcpy(pBuffer, pSrc, cbRequestSize);
+ if (pcbRead != NULL)
+ {
+ *pcbRead = cbRequestSize;
+ }
+ return S_OK;
+}
+
+// impl of interface method ICorDebugMutableDataTarget::WriteVirtual
+HRESULT STDMETHODCALLTYPE
+InProcDac::InProcDataTarget::WriteVirtual(
+ CORDB_ADDRESS address,
+ const BYTE * pBuffer,
+ ULONG32 cbRequestSize)
+{
+ void * pDst = reinterpret_cast<void*>(address);
+ memcpy(pDst, pBuffer, cbRequestSize);
+ return S_OK;
+}
+
+
+// impl of interface method ICorDebugDataTarget::GetThreadContext
+HRESULT STDMETHODCALLTYPE
+InProcDac::InProcDataTarget::GetThreadContext(
+ DWORD dwThreadID,
+ ULONG32 contextFlags,
+ ULONG32 contextSize,
+ PBYTE pContext)
+{
+ if (contextSize < sizeof(CONTEXT))
+ {
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+ HandleHolder hThread = ::OpenThread(THREAD_GET_CONTEXT, FALSE, dwThreadID);
+ if (hThread == NULL)
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ // This assumes pContext is appropriately aligned.
+ CONTEXT * pCtx = reinterpret_cast<CONTEXT*>(pContext);
+ pCtx->ContextFlags = contextFlags;
+ if (!::GetThreadContext(hThread, pCtx))
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ return S_OK;
+}
+
+// impl of interface method ICorDebugMutableDataTarget::SetThreadContext
+HRESULT STDMETHODCALLTYPE
+InProcDac::InProcDataTarget::SetThreadContext(
+ DWORD dwThreadID,
+ ULONG32 contextSize,
+ const BYTE * pContext)
+{
+ if (contextSize < sizeof(CONTEXT))
+ {
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+ HandleHolder hThread = ::OpenThread(THREAD_SET_CONTEXT, FALSE, dwThreadID);
+ if (hThread == NULL)
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ // This assumes pContext is appropriately aligned.
+ const CONTEXT * pCtx = reinterpret_cast<const CONTEXT*>(pContext);
+ if (!::SetThreadContext(hThread,pCtx))
+ {
+ return HRESULT_FROM_GetLastError();
+ }
+
+ return S_OK;
+}
+
+// implementation of ICorDebugMutableDataTarget::ContinueStatusChanged
+HRESULT STDMETHODCALLTYPE
+InProcDac::InProcDataTarget::ContinueStatusChanged(
+ DWORD dwThreadId,
+ CORDB_CONTINUE_STATUS continueStatus)
+{
+ return E_NOTIMPL;
+}
+
+#ifndef DACCESS_COMPILE
+
+// Trivial implementation for IDacDbiInterface::IAllocator methods
+void * InProcDac::Alloc(SIZE_T lenBytes)
+{
+ return new BYTE[lenBytes];
+}
+
+void InProcDac::Free(void * p)
+{
+ BYTE* pB = static_cast<BYTE*>(p);
+ delete[] pB;
+}
+
+#endif //!DACCESS_COMPILE
+
+#endif //FEATURE_DBGIPC_TRANSPORT_VM
diff --git a/src/debug/ee/inprocdac.h b/src/debug/ee/inprocdac.h
new file mode 100644
index 0000000000..408159d12a
--- /dev/null
+++ b/src/debug/ee/inprocdac.h
@@ -0,0 +1,157 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: InProcDac.h
+//
+
+//
+//*****************************************************************************
+
+#ifndef _INPROCDAC_H
+#define _INPROCDAC_H
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+#include "dacdbiinterface.h"
+#include "cordebug.h"
+#include "xcordebug.h"
+
+#ifndef DACCESS_COMPILE
+#include "ddunpack.h"
+#endif
+
+class IDacDbiMarshalStub;
+class ReadBuffer;
+class WriteBuffer;
+
+//
+// InProcDac is a helper class used by the Debugger class to make DAC and
+// the IDacDbiInterface available from within process.
+// This is done on the Macintosh because we don't have OS support for our
+// normal out-of-process access (eg. VM read as non-root user).
+//
+// Note that we don't ever actually use this in DACCESS_COMPILE builds - it's
+// implementation is compiled into just mscorwks, but the callbacks (data target
+// and IMetaDataLookup) are called from mscordacwks. We need the declaration
+// visible in DACCESS_COMPILE builds because a field of this type is contained
+// by-value in the Debugger class, and so we need the correct size for field
+// layout.
+//
+class InProcDac
+ : private IDacDbiInterface::IMetaDataLookup,
+ private IDacDbiInterface::IAllocator
+{
+public:
+ InProcDac() DAC_EMPTY();
+ ~InProcDac() DAC_EMPTY();
+
+ void Initialize();
+ void Cleanup();
+
+ // This takes a marshalled version of a DD interface request
+ HRESULT DoRequest(ReadBuffer * pSend, WriteBuffer * pResult);
+
+private:
+
+ // IMetaDataLookup methods
+ virtual IMDInternalImport * LookupMetaData(VMPTR_PEFile addressPEFile, bool &isILMetaDataForNGENImage);
+
+ //
+ // IAllocator interfaces
+ //
+ virtual void * Alloc(SIZE_T lenBytes) DAC_EMPTY_RET(NULL);
+
+ virtual void Free(void * p) DAC_EMPTY();
+
+ class InProcDataTarget :
+ public ICorDebugMutableDataTarget
+ {
+ public:
+ InProcDataTarget();
+ virtual ~InProcDataTarget();
+
+ // IUnknown.
+ virtual HRESULT STDMETHODCALLTYPE QueryInterface(
+ REFIID riid,
+ void** ppInterface);
+
+ virtual ULONG STDMETHODCALLTYPE AddRef();
+
+ virtual ULONG STDMETHODCALLTYPE Release();
+
+ // ICorDebugMutableDataTarget.
+ virtual HRESULT STDMETHODCALLTYPE GetPlatform(
+ CorDebugPlatform *pPlatform);
+
+ virtual HRESULT STDMETHODCALLTYPE ReadVirtual(
+ CORDB_ADDRESS address,
+ PBYTE pBuffer,
+ ULONG32 request,
+ ULONG32 *pcbRead);
+
+ virtual HRESULT STDMETHODCALLTYPE WriteVirtual(
+ CORDB_ADDRESS address,
+ const BYTE * pBuffer,
+ ULONG32 request);
+
+ virtual HRESULT STDMETHODCALLTYPE GetThreadContext(
+ DWORD dwThreadID,
+ ULONG32 contextFlags,
+ ULONG32 contextSize,
+ PBYTE context);
+
+ virtual HRESULT STDMETHODCALLTYPE SetThreadContext(
+ DWORD dwThreadID,
+ ULONG32 contextSize,
+ const BYTE * context);
+
+ virtual HRESULT STDMETHODCALLTYPE ContinueStatusChanged(
+ DWORD dwThreadId,
+ CORDB_CONTINUE_STATUS continueStatus);
+
+ private:
+ LONG m_ref; // Reference count.
+ };
+
+
+
+private:
+ //
+ // InProcDac Fields
+ //
+ ReleaseHolder<InProcDataTarget> m_pDataTarget;
+ HModuleHolder m_hDacModule;
+#ifndef DACCESS_COMPILE
+ IDacDbiInterface * m_pDacDbi;
+ DDUnpack * m_pUnpacker;
+#else
+ VOID * m_pDacDbi;
+ VOID * m_pUnpacker;
+#endif
+};
+
+
+#ifdef DACCESS_COMPILE
+// This method is a funny case for DAC and DacCop. InProcDac isn't used in DACCESS_COMPILE builds at all
+// (inprocdac.cpp isn't compiled in DAC builds), but we need the declaration since an instance
+// of it is contained by-value in the Debugger class (need to know the right size so field layout
+// matches the target). The LookupMetadata function is called from DAC, and so DacCop searches
+// for all implementations of it in mscordacwks.dll and find this one (the real one is either in
+// mscordbi.dll or coreclr which DacCop doesn't analyze). We need an implementation of virtual
+// methods for the DACCESS_COMPILE build, but rather than use the usual DAC_EMPTY macros we'll
+// use this explicit implementation here to avoid a DacCop violation.
+inline IMDInternalImport * InProcDac::LookupMetaData(VMPTR_PEFile addressPEFile, bool &isILMetaDataForNGENImage)
+{
+ SUPPORTS_DAC; // not really - but we should never be called
+ _ASSERTE_MSG(false, "This implementation should never be called in DAC builds");
+ DacError(E_UNEXPECTED);
+ return NULL;
+}
+#endif // DACCESS_COMPILE
+
+
+
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+
+#endif //_INPROCDAC_H
diff --git a/src/debug/ee/rcthread.cpp b/src/debug/ee/rcthread.cpp
new file mode 100644
index 0000000000..7e6f1ae304
--- /dev/null
+++ b/src/debug/ee/rcthread.cpp
@@ -0,0 +1,2209 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: RCThread.cpp
+//
+
+//
+// Runtime Controller Thread
+//
+//*****************************************************************************
+
+#include "stdafx.h"
+
+
+#include "securitywrapper.h"
+#include <aclapi.h>
+#include <hosting.h>
+
+#include "ipcmanagerinterface.h"
+#include "eemessagebox.h"
+#include "genericstackprobe.h"
+
+#ifndef SM_REMOTESESSION
+#define SM_REMOTESESSION 0x1000
+#endif
+
+#include <limits.h>
+
+#ifdef _DEBUG
+// Declare statics
+EEThreadId DebuggerRCThread::s_DbgHelperThreadId;
+#endif
+
+//
+// Constructor
+//
+DebuggerRCThread::DebuggerRCThread(Debugger * pDebugger)
+ : m_debugger(pDebugger),
+ m_pDCB(NULL),
+ m_thread(NULL),
+ m_run(true),
+ m_threadControlEvent(NULL),
+ m_helperThreadCanGoEvent(NULL),
+ m_fDetachRightSide(false)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ WRAPPER(THROWS);
+ GC_NOTRIGGER;
+ CONSTRUCTOR_CHECK;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pDebugger != NULL);
+
+ for( int i = 0; i < IPC_TARGET_COUNT;i++)
+ {
+ m_rgfInitRuntimeOffsets[i] = true;
+ }
+
+ // Initialize this here because we Destroy it in the DTOR.
+ // Note that this function can't fail.
+}
+
+
+//
+// Destructor. Cleans up all of the open handles the RC thread uses.
+// This expects that the RC thread has been stopped and has terminated
+// before being called.
+//
+DebuggerRCThread::~DebuggerRCThread()
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ DESTRUCTOR_CHECK;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_INFO1000, "DebuggerRCThread::~DebuggerRCThread\n"));
+
+ // We explicitly leak the debugger object on shutdown. See Debugger::StopDebugger for details.
+ _ASSERTE(!"RCThread dtor should not be called.");
+}
+
+
+
+//---------------------------------------------------------------------------------------
+//
+// Close the IPC events associated with a debugger connection
+//
+// Notes:
+// The only IPC connection supported is OOP.
+//
+//---------------------------------------------------------------------------------------
+void DebuggerRCThread::CloseIPCHandles()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ if( m_pDCB != NULL)
+ {
+ m_pDCB->m_rightSideProcessHandle.Close();
+ }
+}
+
+//-----------------------------------------------------------------------------
+// Helper to get the proper decorated name
+// Caller ensures that pBufSize is large enough. We'll assert just to check,
+// but no runtime failure.
+// pBuf - the output buffer to write the decorated name in
+// cBufSizeInChars - the size of the buffer in characters, including the null.
+// pPrefx - The undecorated name of the event.
+//-----------------------------------------------------------------------------
+void GetPidDecoratedName(__out_z __in_ecount(cBufSizeInChars) WCHAR * pBuf,
+ int cBufSizeInChars,
+ const WCHAR * pPrefix)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ DWORD pid = GetCurrentProcessId();
+
+ GetPidDecoratedName(pBuf, cBufSizeInChars, pPrefix, pid);
+}
+
+
+
+
+//-----------------------------------------------------------------------------
+// Simple wrapper to create win32 events.
+// This helps make DebuggerRCThread::Init pretty, beccause we
+// create lots of events there.
+// These will either:
+// 1) Create/Open and return an event
+// 2) or throw an exception.
+// @todo - should these be CLREvents? ClrCreateManualEvent / ClrCreateAutoEvent
+//-----------------------------------------------------------------------------
+HANDLE CreateWin32EventOrThrow(
+ LPSECURITY_ATTRIBUTES lpEventAttributes,
+ EEventResetType eType,
+ BOOL bInitialState
+)
+{
+ CONTRACT(HANDLE)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(lpEventAttributes, NULL_OK));
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ HANDLE h = NULL;
+ h = WszCreateEvent(lpEventAttributes, (BOOL) eType, bInitialState, NULL);
+
+ if (h == NULL)
+ ThrowLastError();
+
+ RETURN h;
+}
+
+//-----------------------------------------------------------------------------
+// Open an event. Another helper for DebuggerRCThread::Init
+//-----------------------------------------------------------------------------
+HANDLE OpenWin32EventOrThrow(
+ DWORD dwDesiredAccess,
+ BOOL bInheritHandle,
+ LPCWSTR lpName
+)
+{
+ CONTRACT(HANDLE)
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ POSTCONDITION(RETVAL != NULL);
+ }
+ CONTRACT_END;
+
+ HANDLE h = WszOpenEvent(
+ dwDesiredAccess,
+ bInheritHandle,
+ lpName
+ );
+ if (h == NULL)
+ ThrowLastError();
+
+ RETURN h;
+}
+
+//-----------------------------------------------------------------------------
+// Holder for IPC SecurityAttribute
+//-----------------------------------------------------------------------------
+IPCHostSecurityAttributeHolder::IPCHostSecurityAttributeHolder(DWORD pid)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ m_pSA = NULL;
+
+#ifdef FEATURE_IPCMAN
+ HRESULT hr = CCLRSecurityAttributeManager::GetHostSecurityAttributes(&m_pSA);
+ IfFailThrow(hr);
+
+ _ASSERTE(m_pSA != NULL);
+#endif // FEATURE_IPCMAN
+}
+
+SECURITY_ATTRIBUTES * IPCHostSecurityAttributeHolder::GetHostSA()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pSA;
+}
+
+
+IPCHostSecurityAttributeHolder::~IPCHostSecurityAttributeHolder()
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifdef FEATURE_IPCMAN
+ CCLRSecurityAttributeManager::DestroyHostSecurityAttributes(m_pSA);
+#endif // FEATURE_IPCMAN
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Init
+//
+// Initialize the IPC block.
+//
+// Arguments:
+// hRsea - Handle to Right-Side Event Available event.
+// hRser - Handle to Right-Side Event Read event.
+// hLsea - Handle to Left-Side Event Available event.
+// hLser - Handle to Left-Side Event Read event.
+// hLsuwe - Handle to Left-Side unmanaged wait event.
+//
+// Notes:
+// The Init method works since there are no virtual functions - don't add any virtual functions without
+// changing this!
+// We assume ownership of the handles as soon as we're called; regardless of our success.
+// On failure, we throw.
+// Initialization of the debugger control block occurs partly on the left side and partly on
+// the right side. This initialization occurs in parallel, so it's unsafe to make assumptions about
+// the order in which the fields will be initialized.
+//
+//
+//---------------------------------------------------------------------------------------
+HRESULT DebuggerIPCControlBlock::Init(
+ HANDLE hRsea,
+ HANDLE hRser,
+ HANDLE hLsea,
+ HANDLE hLser,
+ HANDLE hLsuwe
+)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ // NOTE this works since there are no virtual functions - don't add any without changing this!
+ // Although we assume the IPC block is zero-initialized by the OS upon creation, we still need to clear
+ // the memory here to protect ourselves from DOS attack. One scenario is when a malicious debugger
+ // pre-creates a bogus IPC block. This means that our synchronization scheme won't work in DOS
+ // attack scenarios, but we will be messed up anyway.
+ // WARNING!!! m_DCBSize is used as a semaphore and is set to non-zero to signal that initialization of the
+ // WARNING!!! DCB is complete. if you remove the below memset be sure to initialize m_DCBSize to zero in the ctor!
+ memset( this, 0, sizeof( DebuggerIPCControlBlock) );
+
+ // Setup version checking info.
+ m_verMajor = VER_PRODUCTBUILD;
+ m_verMinor = VER_PRODUCTBUILD_QFE;
+
+#ifdef _DEBUG
+ m_checkedBuild = true;
+#else
+ m_checkedBuild = false;
+#endif
+ m_bHostingInFiber = false;
+
+ // Are we in fiber mode? In Whidbey, we do not support launch a fiber mode process
+ // nor do we support attach to a fiber mode process.
+ //
+ if (g_CORDebuggerControlFlags & DBCF_FIBERMODE)
+ {
+ m_bHostingInFiber = true;
+ }
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(!useTransport)
+ {
+#endif
+ // Copy RSEA and RSER into the control block.
+ if (!m_rightSideEventAvailable.SetLocal(hRsea))
+ {
+ ThrowLastError();
+ }
+
+ if (!m_rightSideEventRead.SetLocal(hRser))
+ {
+ ThrowLastError();
+ }
+
+ if (!m_leftSideUnmanagedWaitEvent.SetLocal(hLsuwe))
+ {
+ ThrowLastError();
+ }
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+#endif // !FEATURE_DBGIPC_TRANSPORT_VM
+
+
+ // Mark the debugger special thread list as not dirty, empty and null.
+ m_specialThreadListDirty = false;
+ m_specialThreadListLength = 0;
+ m_specialThreadList = NULL;
+
+ m_shutdownBegun = false;
+
+ return S_OK;
+}
+
+#ifdef FEATURE_IPCMAN
+extern CCLRSecurityAttributeManager s_CLRSecurityAttributeManager;
+#endif // FEATURE_IPCMAN
+
+
+void DebuggerRCThread::WatchForStragglers(void)
+{
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(m_threadControlEvent != NULL);
+ LOG((LF_CORDB,LL_INFO100000, "DRCT::WFS:setting event to watch "
+ "for stragglers\n"));
+
+ SetEvent(m_threadControlEvent);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Init sets up all the objects that the RC thread will need to run.
+//
+//
+// Return Value:
+// S_OK on success. May also throw.
+//
+// Assumptions:
+// Called during startup, even if we're not debugging.
+//
+//
+//---------------------------------------------------------------------------------------
+HRESULT DebuggerRCThread::Init(void)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(!ThisIsHelperThreadWorker()); // initialized by main thread
+ }
+ CONTRACTL_END;
+
+
+ LOG((LF_CORDB, LL_EVERYTHING, "DebuggerRCThreadInit called\n"));
+
+ DWORD dwStatus;
+ if (m_debugger == NULL)
+ {
+ ThrowHR(E_INVALIDARG);
+ }
+
+ // Init should only be called once.
+ if (g_pRCThread != NULL)
+ {
+ ThrowHR(E_FAIL);
+ }
+
+ g_pRCThread = this;
+
+ m_favorData.Init(); // throws
+
+
+ // Create the thread control event.
+ m_threadControlEvent = CreateWin32EventOrThrow(NULL, kAutoResetEvent, FALSE);
+
+ // Create the helper thread can go event.
+ m_helperThreadCanGoEvent = CreateWin32EventOrThrow(NULL, kManualResetEvent, TRUE);
+
+ m_pDCB = new(nothrow) DebuggerIPCControlBlock;
+
+ // Don't fail out because the shared memory failed to create
+#if _DEBUG
+ if (m_pDCB == NULL)
+ {
+ LOG((LF_CORDB, LL_INFO10000,
+ "DRCT::I: Failed to get Debug IPC block.\n"));
+ }
+#endif // _DEBUG
+
+ HRESULT hr;
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(!useTransport)
+ {
+#endif
+ IPCHostSecurityAttributeHolder sa(GetCurrentProcessId());
+
+ // Create the events that the thread will need to receive events
+ // from the out of process piece on the right side.
+ // We will not fail out if CreateEvent fails for RSEA or RSER. Because
+ // the worst case is that debugger cannot attach to debuggee.
+ //
+ HandleHolder rightSideEventAvailable(WszCreateEvent(sa.GetHostSA(), (BOOL) kAutoResetEvent, FALSE, NULL));
+
+ // Security fix:
+ // We need to check the last error to see if the event was precreated or not
+ // If so, we need to release the handle right now.
+ //
+ dwStatus = GetLastError();
+ if (dwStatus == ERROR_ALREADY_EXISTS)
+ {
+ // clean up the handle now
+ rightSideEventAvailable.Clear();
+ }
+
+ HandleHolder rightSideEventRead(WszCreateEvent(sa.GetHostSA(), (BOOL) kAutoResetEvent, FALSE, NULL));
+
+ // Security fix:
+ // We need to check the last error to see if the event was precreated or not
+ // If so, we need to release the handle right now.
+ //
+ dwStatus = GetLastError();
+ if (dwStatus == ERROR_ALREADY_EXISTS)
+ {
+ // clean up the handle now
+ rightSideEventRead.Clear();
+ }
+
+
+ HandleHolder leftSideUnmanagedWaitEvent(CreateWin32EventOrThrow(NULL, kManualResetEvent, FALSE));
+
+ // Copy RSEA and RSER into the control block only if shared memory is created without error.
+ if (m_pDCB)
+ {
+ // Since Init() gets ownership of handles as soon as it's called, we can
+ // release our ownership now.
+ rightSideEventAvailable.SuppressRelease();
+ rightSideEventRead.SuppressRelease();
+ leftSideUnmanagedWaitEvent.SuppressRelease();
+
+ // NOTE: initialization of the debugger control block occurs partly on the left side and partly on
+ // the right side. This initialization occurs in parallel, so it's unsafe to make assumptions about
+ // the order in which the fields will be initialized.
+ hr = m_pDCB->Init(rightSideEventAvailable,
+ rightSideEventRead,
+ NULL,
+ NULL,
+ leftSideUnmanagedWaitEvent);
+
+ _ASSERTE(SUCCEEDED(hr)); // throws on error.
+ }
+
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+ else
+ {
+ if (m_pDCB)
+ {
+ hr = m_pDCB->Init(NULL, NULL, NULL, NULL, NULL);
+ _ASSERTE(SUCCEEDED(hr)); // throws on error.
+ }
+ }
+#endif
+
+ if(m_pDCB)
+ {
+ // We have to ensure that most of the runtime offsets for the out-of-proc DCB are initialized right away. This is
+ // needed to support certian races during an interop attach. Since we can't know whether an interop attach will ever
+ // happen or not, we are forced to do this now. Note: this is really too early, as some data structures haven't been
+ // initialized yet!
+ hr = EnsureRuntimeOffsetsInit(IPC_TARGET_OUTOFPROC);
+ _ASSERTE(SUCCEEDED(hr)); // throw on error
+
+ // Note: we have to mark that we need the runtime offsets re-initialized for the out-of-proc DCB. This is because
+ // things like the patch table aren't initialized yet. Calling NeedRuntimeOffsetsReInit() ensures that this happens
+ // before we really need the patch table.
+ NeedRuntimeOffsetsReInit(IPC_TARGET_OUTOFPROC);
+
+ m_pDCB->m_helperThreadStartAddr = (void *) DebuggerRCThread::ThreadProcStatic;
+ m_pDCB->m_helperRemoteStartAddr = (void *) DebuggerRCThread::ThreadProcRemote;
+ m_pDCB->m_leftSideProtocolCurrent = CorDB_LeftSideProtocolCurrent;
+ m_pDCB->m_leftSideProtocolMinSupported = CorDB_LeftSideProtocolMinSupported;
+
+ LOG((LF_CORDB, LL_INFO10,
+ "DRCT::I: version info: %d.%d.%d current protocol=%d, min protocol=%d\n",
+ m_pDCB->m_verMajor,
+ m_pDCB->m_verMinor,
+ m_pDCB->m_checkedBuild,
+ m_pDCB->m_leftSideProtocolCurrent,
+ m_pDCB->m_leftSideProtocolMinSupported));
+
+ // Left-side always creates helper-thread.
+ // @dbgtodo inspection - by end of V3, LS will never create helper-thread :)
+ m_pDCB->m_rightSideShouldCreateHelperThread = false;
+
+ // m_DCBSize is used as a semaphore to indicate that the DCB is fully initialized.
+ // let's ensure that it's updated after all the other fields.
+ MemoryBarrier();
+ m_pDCB->m_DCBSize = sizeof(DebuggerIPCControlBlock);
+ }
+
+ return S_OK;
+}
+
+#ifndef FEATURE_PAL
+
+// This function is used to verify the security descriptor on an event
+// matches our expectation to prevent attack. This should be called when
+// we opened an event by name and assumed that the RS creates the event.
+// That means the event's dacl should match our default policy - current user
+// and admin. It can be narrower. By default, the DACL looks like the debugger
+// process user, debuggee user, and admin.
+//
+HRESULT DebuggerRCThread::VerifySecurityOnRSCreatedEvents(
+ HANDLE sse,
+ HANDLE lsea,
+ HANDLE lser)
+{
+
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ STRESS_LOG0(LF_CORDB,LL_INFO1000,"DRCT::VerifySecurityOnRSCreatedEvents\n");
+
+ if (lsea == NULL || lser == NULL)
+ {
+ // no valid handle, does not need to verify.
+ // The caller will close the handles
+ return E_FAIL;
+ }
+
+ HRESULT hr = S_OK;
+
+ SIZE_T i;
+ ACCESS_ALLOWED_ACE *pAllowAceSSE = NULL;
+ ACCESS_ALLOWED_ACE *pAllowAceLSEA = NULL;
+ ACCESS_ALLOWED_ACE *pAllowAceLSER = NULL;
+
+
+ EX_TRY
+ {
+ // Get security descriptors for the handles.
+ Win32SecurityDescriptor sdSSE;
+ sdSSE.InitFromHandle(sse);
+
+ Win32SecurityDescriptor sdLSEA;
+ sdLSEA.InitFromHandle(lsea);
+
+ Win32SecurityDescriptor sdLSER;
+ sdLSER.InitFromHandle(lser);
+
+
+
+
+ // Make sure all 3 have the same creator
+ // We've already verifed in CreateSetupSyncEvent that the SSE's owner is in the DACL.
+ if (!Sid::Equals(sdSSE.GetOwner(), sdLSEA.GetOwner()) ||
+ !Sid::Equals(sdSSE.GetOwner(), sdLSER.GetOwner()))
+ {
+ // Not equal! return now with failure code.
+ STRESS_LOG1(LF_CORDB,LL_INFO1000,"DRCT::VSORSCE failed on EqualSid - 0x%08x\n", hr);
+ ThrowHR(E_FAIL);
+ }
+
+ // DACL_SECURITY_INFORMATION
+ // Now verify the DACL. It should be only two of them at most. One of them is the
+ // target process SID.
+ Dacl daclSSE = sdSSE.GetDacl();
+ Dacl daclLSEA = sdLSEA.GetDacl();
+ Dacl daclLSER = sdLSER.GetDacl();
+
+
+ // Now all of these three ACL should be alike. There should be at most two of entries
+ // there. One if the debugger process's SID and one if debuggee sid.
+ if ((daclSSE.GetAceCount() != 1) && (daclSSE.GetAceCount() != 2))
+ {
+ ThrowHR(E_FAIL);
+ }
+
+
+ // All of the ace count should equal for all events.
+ if ((daclSSE.GetAceCount() != daclLSEA.GetAceCount()) ||
+ (daclSSE.GetAceCount() != daclLSER.GetAceCount()))
+ {
+ ThrowHR(E_FAIL);
+ }
+
+ // Now check the ACE inside.These should be all equal
+ for (i = 0; i < daclSSE.GetAceCount(); i++)
+ {
+ ACE_HEADER *pAce;
+
+ // Get the ace from the SSE
+ pAce = daclSSE.GetAce(i);
+ if (pAce->AceType != ACCESS_ALLOWED_ACE_TYPE)
+ {
+ ThrowHR(E_FAIL);
+ }
+ pAllowAceSSE = (ACCESS_ALLOWED_ACE*)pAce;
+
+ // Get the ace from LSEA
+ pAce = daclLSEA.GetAce(i);
+ if (pAce->AceType != ACCESS_ALLOWED_ACE_TYPE)
+ {
+ ThrowHR(E_FAIL);
+ }
+ pAllowAceLSEA = (ACCESS_ALLOWED_ACE*)pAce;
+
+ // This is the SID
+ // We can call EqualSid on this pAllowAce->SidStart
+ if (EqualSid((PSID)&(pAllowAceSSE->SidStart), (PSID)&(pAllowAceLSEA->SidStart)) == FALSE)
+ {
+ // ACE not equal. Fail out.
+ ThrowHR(E_FAIL);
+ }
+
+ // Get the ace from LSER
+ pAce = daclLSER.GetAce(i);
+ if (pAce->AceType != ACCESS_ALLOWED_ACE_TYPE)
+ {
+ ThrowHR(E_FAIL);
+ }
+ pAllowAceLSER = (ACCESS_ALLOWED_ACE*)pAce;
+
+ if (EqualSid((PSID)&(pAllowAceSSE->SidStart), (PSID)&(pAllowAceLSER->SidStart)) == FALSE)
+ {
+ // ACE not equal. Fail out.
+ ThrowHR(E_FAIL);
+ }
+ } // end for loop.
+
+
+ // The last ACE should be target process. That is it should be
+ // our process's sid!
+ //
+ if (pAllowAceLSER == NULL)
+ {
+ ThrowHR(E_FAIL);; // fail if we don't have the ACE.
+ }
+ {
+ SidBuffer sbCurrentProcess;
+ sbCurrentProcess.InitFromProcess(GetCurrentProcessId());
+ if (!Sid::Equals(sbCurrentProcess.GetSid(), (PSID)&(pAllowAceLSER->SidStart)))
+ {
+ ThrowHR(E_FAIL);
+ }
+ }
+ }
+ EX_CATCH
+ {
+ // If we threw an exception, then the verification failed.
+ hr = E_FAIL;
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ if (FAILED(hr))
+ {
+ STRESS_LOG1(LF_CORDB,LL_INFO1000,"DRCT::VSORSCE failed with - 0x%08x\n", hr);
+ }
+
+ return hr;
+}
+
+#endif // FEATURE_PAL
+
+//---------------------------------------------------------------------------------------
+//
+// Setup the Runtime Offsets struct.
+//
+// Arguments:
+// pDebuggerIPCControlBlock - Pointer to the debugger's portion of the IPC
+// block, which this routine will write into the offsets of various parts of
+// the runtime.
+//
+// Return Value:
+// S_OK on success.
+//
+//---------------------------------------------------------------------------------------
+HRESULT DebuggerRCThread::SetupRuntimeOffsets(DebuggerIPCControlBlock * pDebuggerIPCControlBlock)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ PRECONDITION(ThisMaybeHelperThread());
+ }
+ CONTRACTL_END;
+
+ // Allocate the struct if needed. We just fill in any existing one.
+ DebuggerIPCRuntimeOffsets * pDebuggerRuntimeOffsets = pDebuggerIPCControlBlock->m_pRuntimeOffsets;
+
+ if (pDebuggerRuntimeOffsets == NULL)
+ {
+ // Perhaps we should preallocate this. This is the only allocation
+ // that would force SendIPCEvent to throw an exception. It'd be very
+ // nice to have
+ CONTRACT_VIOLATION(ThrowsViolation);
+ pDebuggerRuntimeOffsets = new DebuggerIPCRuntimeOffsets();
+ _ASSERTE(pDebuggerRuntimeOffsets != NULL); // throws on oom
+ }
+
+ // Fill out the struct.
+#ifdef FEATURE_INTEROP_DEBUGGING
+ pDebuggerRuntimeOffsets->m_genericHijackFuncAddr = Debugger::GenericHijackFunc;
+ // Set flares - these only exist for interop debugging.
+ pDebuggerRuntimeOffsets->m_signalHijackStartedBPAddr = (void*) SignalHijackStartedFlare;
+ pDebuggerRuntimeOffsets->m_excepForRuntimeHandoffStartBPAddr = (void*) ExceptionForRuntimeHandoffStartFlare;
+ pDebuggerRuntimeOffsets->m_excepForRuntimeHandoffCompleteBPAddr = (void*) ExceptionForRuntimeHandoffCompleteFlare;
+ pDebuggerRuntimeOffsets->m_signalHijackCompleteBPAddr = (void*) SignalHijackCompleteFlare;
+ pDebuggerRuntimeOffsets->m_excepNotForRuntimeBPAddr = (void*) ExceptionNotForRuntimeFlare;
+ pDebuggerRuntimeOffsets->m_notifyRSOfSyncCompleteBPAddr = (void*) NotifyRightSideOfSyncCompleteFlare;
+
+#if !defined(FEATURE_CORESYSTEM)
+ // Grab the address of RaiseException in kernel32 because we have to play some games with exceptions
+ // that are generated there (just another reason why mixed mode debugging is shady). See bug 476768.
+ HMODULE hModule = WszGetModuleHandle(W("kernel32.dll"));
+ _ASSERTE(hModule != NULL);
+ PREFAST_ASSUME(hModule != NULL);
+ pDebuggerRuntimeOffsets->m_raiseExceptionAddr = GetProcAddress(hModule, "RaiseException");
+ _ASSERTE(pDebuggerRuntimeOffsets->m_raiseExceptionAddr != NULL);
+ hModule = NULL;
+#else
+ pDebuggerRuntimeOffsets->m_raiseExceptionAddr = NULL;
+#endif
+#endif // FEATURE_INTEROP_DEBUGGING
+
+ pDebuggerRuntimeOffsets->m_pPatches = DebuggerController::GetPatchTable();
+ pDebuggerRuntimeOffsets->m_pPatchTableValid = (BOOL*)DebuggerController::GetPatchTableValidAddr();
+ pDebuggerRuntimeOffsets->m_offRgData = DebuggerPatchTable::GetOffsetOfEntries();
+ pDebuggerRuntimeOffsets->m_offCData = DebuggerPatchTable::GetOffsetOfCount();
+ pDebuggerRuntimeOffsets->m_cbPatch = sizeof(DebuggerControllerPatch);
+ pDebuggerRuntimeOffsets->m_offAddr = offsetof(DebuggerControllerPatch, address);
+ pDebuggerRuntimeOffsets->m_offOpcode = offsetof(DebuggerControllerPatch, opcode);
+ pDebuggerRuntimeOffsets->m_cbOpcode = sizeof(PRD_TYPE);
+ pDebuggerRuntimeOffsets->m_offTraceType = offsetof(DebuggerControllerPatch, trace.type);
+ pDebuggerRuntimeOffsets->m_traceTypeUnmanaged = TRACE_UNMANAGED;
+
+ // @dbgtodo inspection - this should all go away or be obtained from DacDbi Primitives.
+ g_pEEInterface->GetRuntimeOffsets(&pDebuggerRuntimeOffsets->m_TLSIndex,
+ &pDebuggerRuntimeOffsets->m_TLSIsSpecialIndex,
+ &pDebuggerRuntimeOffsets->m_TLSCantStopIndex,
+ &pDebuggerRuntimeOffsets->m_TLSIndexOfPredefs,
+ &pDebuggerRuntimeOffsets->m_EEThreadStateOffset,
+ &pDebuggerRuntimeOffsets->m_EEThreadStateNCOffset,
+ &pDebuggerRuntimeOffsets->m_EEThreadPGCDisabledOffset,
+ &pDebuggerRuntimeOffsets->m_EEThreadPGCDisabledValue,
+ &pDebuggerRuntimeOffsets->m_EEThreadDebuggerWordOffset,
+ &pDebuggerRuntimeOffsets->m_EEThreadFrameOffset,
+ &pDebuggerRuntimeOffsets->m_EEThreadMaxNeededSize,
+ &pDebuggerRuntimeOffsets->m_EEThreadSteppingStateMask,
+ &pDebuggerRuntimeOffsets->m_EEMaxFrameValue,
+ &pDebuggerRuntimeOffsets->m_EEThreadDebuggerFilterContextOffset,
+ &pDebuggerRuntimeOffsets->m_EEThreadCantStopOffset,
+ &pDebuggerRuntimeOffsets->m_EEFrameNextOffset,
+ &pDebuggerRuntimeOffsets->m_EEIsManagedExceptionStateMask);
+
+#ifndef FEATURE_IMPLICIT_TLS
+ _ASSERTE((pDebuggerRuntimeOffsets->m_TLSIndexOfPredefs != 0) || !"CExecutionEngine::TlsIndex is not initialized yet");
+#endif
+
+ // Remember the struct in the control block.
+ pDebuggerIPCControlBlock->m_pRuntimeOffsets = pDebuggerRuntimeOffsets;
+
+ return S_OK;
+}
+
+struct DebugFilterParam
+{
+ DebuggerIPCEvent *event;
+};
+
+// Filter called when we throw an exception while Handling events.
+static LONG _debugFilter(LPEXCEPTION_POINTERS ep, PVOID pv)
+{
+ LOG((LF_CORDB, LL_INFO10,
+ "Unhandled exception in Debugger::HandleIPCEvent\n"));
+
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+
+#if defined(_DEBUG) || !defined(FEATURE_CORESYSTEM)
+ DebuggerIPCEvent *event = ((DebugFilterParam *)pv)->event;
+
+ DWORD pid = GetCurrentProcessId();
+ DWORD tid = GetCurrentThreadId();
+
+ DebuggerIPCEventType type = (DebuggerIPCEventType) (event->type & DB_IPCE_TYPE_MASK);
+#endif _DEBUG || !FEATURE_CORESYSTEM
+
+ // We should never AV here. In a debug build, throw up an assert w/ lots of useful (private) info.
+#ifdef _DEBUG
+ {
+ // We can't really use SStrings on the helper thread; though if we're at this point, we've already died.
+ // So go ahead and risk it and use them anyways.
+ SString sStack;
+ StackScratchBuffer buffer;
+ GetStackTraceAtContext(sStack, ep->ContextRecord);
+ const CHAR *string = NULL;
+
+ EX_TRY
+ {
+ string = sStack.GetANSI(buffer);
+ }
+ EX_CATCH
+ {
+ string = "*Could not retrieve stack*";
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+
+ CONSISTENCY_CHECK_MSGF(false,
+ ("Unhandled exception on the helper thread.\nEvent=%s(0x%x)\nCode=0x%0x, Ip=0x%p, .cxr=%p, .exr=%p.\n pid=0x%x (%d), tid=0x%x (%d).\n-----\nStack of exception:\n%s\n----\n",
+ IPCENames::GetName(type), type,
+ ep->ExceptionRecord->ExceptionCode, GetIP(ep->ContextRecord), ep->ContextRecord, ep->ExceptionRecord,
+ pid, pid, tid, tid,
+ string));
+ }
+#endif
+
+// this message box doesn't work well on coresystem... we actually get in a recursive exception handling loop
+#ifndef FEATURE_CORESYSTEM
+ // We took an AV on the helper thread. This is a catastrophic situation so we can
+ // simply call the EE's catastrophic message box to display the error.
+ EEMessageBoxCatastrophic(
+ IDS_DEBUG_UNHANDLEDEXCEPTION_IPC, IDS_DEBUG_SERVICE_CAPTION,
+ type,
+ ep->ExceptionRecord->ExceptionCode,
+ GetIP(ep->ContextRecord),
+ pid, pid, tid, tid);
+#endif
+
+ // For debugging, we can change the behavior by manually setting eax.
+ // EXCEPTION_EXECUTE_HANDLER=1, EXCEPTION_CONTINUE_SEARCH=0, EXCEPTION_CONTINUE_EXECUTION=-1
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+#ifdef _DEBUG
+// Tracking to ensure that we don't call New() for the normal (non interop-safe heap)
+// on the helper thread. We also can't do a normal allocation when we have hard
+// suspended any other thread (since it could hold the OS heap lock).
+
+// TODO: this probably belongs in the EE itself, not here in the debugger stuff.
+
+void AssertAllocationAllowed()
+{
+#ifdef USE_INTEROPSAFE_HEAP
+ // Don't forget to preserve error status!
+ DWORD err = GetLastError();
+
+ // We can mark certain
+ if (g_DbgSuppressAllocationAsserts == 0)
+ {
+
+ // if we have hard suspended any threads. We want to assert as it could cause deadlock
+ // since those suspended threads may hold the OS heap lock
+ if (g_fEEStarted) {
+ _ASSERTE (!EEAllocationDisallowed());
+ }
+
+ // Can't call IsDbgHelperSpecialThread() here b/c that changes program state.
+ // So we use our
+ if (DebuggerRCThread::s_DbgHelperThreadId.IsSameThread())
+ {
+ // In case assert allocates, bump up the 'OK' counter to avoid an infinite recursion.
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+
+ _ASSERTE(false || !"New called on Helper Thread");
+
+ }
+ }
+ SetLastError(err);
+#endif
+}
+#endif
+
+
+//---------------------------------------------------------------------------------------
+//
+// Primary function of the Runtime Controller thread. First, we let
+// the Debugger Interface know that we're up and running. Then, we run
+// the main loop.
+//
+//---------------------------------------------------------------------------------------
+void DebuggerRCThread::ThreadProc(void)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_TRIGGERS; // Debugger::SuspendComplete can trigger GC
+
+ // Although we're the helper thread, we haven't set it yet.
+ DISABLED(PRECONDITION(ThisIsHelperThreadWorker()));
+
+ INSTANCE_CHECK;
+ }
+ CONTRACTL_END;
+
+ STRESS_LOG_RESERVE_MEM (0);
+ // This message actually serves a purpose (which is why it is always run)
+ // The Stress log is run during hijacking, when other threads can be suspended
+ // at arbitrary locations (including when holding a lock that NT uses to serialize
+ // all memory allocations). By sending a message now, we insure that the stress
+ // log will not allocate memory at these critical times an avoid deadlock.
+ {
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ STRESS_LOG0(LF_CORDB|LF_ALWAYS, LL_ALWAYS, "Debugger Thread spinning up\n");
+
+ // Call this to force creation of the TLS slots on helper-thread.
+ IsDbgHelperSpecialThread();
+ }
+
+#ifdef _DEBUG
+ // Track the helper thread.
+ s_DbgHelperThreadId.SetThreadId();
+#endif
+ CantAllocHolder caHolder;
+
+
+#ifdef _DEBUG
+ // Cause wait in the helper thread startup. This lets us test against certain races.
+ // 1 = 6 sec. (shorter than Poll)
+ // 2 = 12 sec (longer than Poll).
+ // 3 = infinite - never comes up.
+ static int fDelayHelper = -1;
+
+ if (fDelayHelper == -1)
+ {
+ fDelayHelper = UnsafeGetConfigDWORD(CLRConfig::INTERNAL_DbgDelayHelper);
+ }
+
+ if (fDelayHelper)
+ {
+ DWORD dwSleep = 6000;
+
+ switch(fDelayHelper)
+ {
+ case 1: dwSleep = 6000; break;
+ case 2: dwSleep = 12000; break;
+ case 3: dwSleep = INFINITE; break;
+ }
+
+ ClrSleepEx(dwSleep, FALSE);
+ }
+#endif
+
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::TP: helper thread spinning up...\n"));
+
+ // In case the shared memory is not initialized properly, it will be noop
+ if (m_pDCB == NULL)
+ {
+ return;
+ }
+
+ // Lock the debugger before spinning up.
+ Debugger::DebuggerLockHolder debugLockHolder(m_debugger);
+
+ if (m_pDCB->m_helperThreadId != 0)
+ {
+ // someone else has created a helper thread, we're outta here
+ // the most likely scenario here is that there was some kind of
+ // race between remotethread creation and localthread creation
+
+ LOG((LF_CORDB, LL_EVERYTHING, "Second debug helper thread creation detected, thread will safely suicide\n"));
+ // dbgLockHolder goes out of scope - implicit Release
+ return;
+ }
+
+ // this thread took the lock and there is no existing m_helperThreadID therefore
+ // this *IS* the helper thread and nobody else can be the helper thread
+
+ // the handle was created by the Start method
+ _ASSERTE(m_thread != NULL);
+
+#ifdef _DEBUG
+ // Make sure that we have the proper permissions.
+ {
+ DWORD dwWaitResult = WaitForSingleObject(m_thread, 0);
+ _ASSERTE(dwWaitResult == WAIT_TIMEOUT);
+ }
+#endif
+
+ // Mark that we're the true helper thread. Now that we've marked
+ // this, no other threads will ever become the temporary helper
+ // thread.
+ m_pDCB->m_helperThreadId = GetCurrentThreadId();
+
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::TP: helper thread id is 0x%x helperThreadId\n",
+ m_pDCB->m_helperThreadId));
+
+ // If there is a temporary helper thread, then we need to wait for
+ // it to finish being the helper thread before we can become the
+ // helper thread.
+ if (m_pDCB->m_temporaryHelperThreadId != 0)
+ {
+ LOG((LF_CORDB, LL_INFO1000,
+ "DRCT::TP: temporary helper thread 0x%x is in the way, "
+ "waiting...\n",
+ m_pDCB->m_temporaryHelperThreadId));
+
+ debugLockHolder.Release();
+
+ // Wait for the temporary helper thread to finish up.
+ DWORD dwWaitResult = WaitForSingleObject(m_helperThreadCanGoEvent, INFINITE);
+ (void)dwWaitResult; //prevent "unused variable" error from GCC
+
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::TP: done waiting for temp help to finish up.\n"));
+
+ _ASSERTE(dwWaitResult == WAIT_OBJECT_0);
+ _ASSERTE(m_pDCB->m_temporaryHelperThreadId==0);
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::TP: no temp help in the way...\n"));
+
+ debugLockHolder.Release();
+ }
+
+ // Run the main loop as the true helper thread.
+ MainLoop();
+}
+
+void DebuggerRCThread::RightSideDetach(void)
+{
+ _ASSERTE( m_fDetachRightSide == false );
+ m_fDetachRightSide = true;
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(!useTransport)
+ {
+#endif
+ CloseIPCHandles();
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+#endif // !FEATURE_DBGIPC_TRANSPORT_VM
+}
+
+//
+// These defines control how many times we spin while waiting for threads to sync and how often. Note its higher in
+// debug builds to allow extra time for threads to sync.
+//
+#define CorDB_SYNC_WAIT_TIMEOUT 20 // 20ms
+
+#ifdef _DEBUG
+#define CorDB_MAX_SYNC_SPIN_COUNT (10000 / CorDB_SYNC_WAIT_TIMEOUT) // (10 seconds)
+#else
+#define CorDB_MAX_SYNC_SPIN_COUNT (3000 / CorDB_SYNC_WAIT_TIMEOUT) // (3 seconds)
+#endif
+
+//
+// NDPWhidbey issue 10749 - Due to a compiler change for vc7.1,
+// Don't inline this function!
+// PAL_TRY allocates space on the stack and so can not be used within a loop,
+// else we'll slowly leak stack space w/ each interation and get an overflow.
+// So make this its own function to enforce that we free the stack space between
+// iterations.
+//
+bool HandleIPCEventWrapper(Debugger* pDebugger, DebuggerIPCEvent *e)
+{
+ struct Param : DebugFilterParam
+ {
+ Debugger* pDebugger;
+ bool wasContinue;
+ } param;
+ param.event = e;
+ param.pDebugger = pDebugger;
+ param.wasContinue = false;
+ PAL_TRY(Param *, pParam, &param)
+ {
+ pParam->wasContinue = pParam->pDebugger->HandleIPCEvent(pParam->event);
+ }
+ PAL_EXCEPT_FILTER(_debugFilter)
+ {
+ LOG((LF_CORDB, LL_INFO10, "Unhandled exception caught in Debugger::HandleIPCEvent\n"));
+ }
+ PAL_ENDTRY
+
+ return param.wasContinue;
+}
+
+bool DebuggerRCThread::HandleRSEA()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ if (g_pEEInterface->GetThread() != NULL) { GC_TRIGGERS; } else { GC_NOTRIGGER; }
+ PRECONDITION(ThisIsHelperThreadWorker());
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB,LL_INFO10000, "RSEA from out of process (right side)\n"));
+ DebuggerIPCEvent * e;
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(!useTransport)
+ {
+#endif
+ // Make room for any Right Side event on the stack.
+ BYTE buffer[CorDBIPC_BUFFER_SIZE];
+ e = (DebuggerIPCEvent *) buffer;
+
+ // If the RSEA is signaled, then handle the event from the Right Side.
+ memcpy(e, GetIPCEventReceiveBuffer(), CorDBIPC_BUFFER_SIZE);
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+ else
+ {
+ // Be sure to fetch the event into the official receive buffer since some event handlers assume it's there
+ // regardless of the the event buffer pointer passed to them.
+ e = GetIPCEventReceiveBuffer();
+ g_pDbgTransport->GetNextEvent(e, CorDBIPC_BUFFER_SIZE);
+ }
+#endif // !FEATURE_DBGIPC_TRANSPOPRT
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ if(!useTransport)
+ {
+#endif
+ // If no reply is required, then let the Right Side go since we've got a copy of the event now.
+ _ASSERTE(!e->asyncSend || !e->replyRequired);
+
+ if (!e->replyRequired && !e->asyncSend)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::ML: no reply required, letting Right Side go.\n"));
+
+ BOOL succ = SetEvent(m_pDCB->m_rightSideEventRead);
+
+ if (!succ)
+ CORDBDebuggerSetUnrecoverableWin32Error(m_debugger, 0, true);
+ }
+#ifdef LOGGING
+ else if (e->asyncSend)
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::ML: async send.\n"));
+ else
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::ML: reply required, holding Right Side...\n"));
+#endif
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+
+ // Pass the event to the debugger for handling. Returns true if the event was a Continue event and we can
+ // stop looking for stragglers. We wrap this whole thing in an exception handler to help us debug faults.
+ bool wasContinue = false;
+
+ wasContinue = HandleIPCEventWrapper(m_debugger, e);
+
+ return wasContinue;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Main loop of the Runtime Controller thread. It waits for IPC events
+// and dishes them out to the Debugger object for processing.
+//
+// Some of this logic is copied in Debugger::VrpcToVls
+//
+//---------------------------------------------------------------------------------------
+void DebuggerRCThread::MainLoop()
+{
+ // This function can only be called on native Debugger helper thread.
+ //
+
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+
+ PRECONDITION(m_thread != NULL);
+ PRECONDITION(ThisIsHelperThreadWorker());
+ PRECONDITION(IsDbgHelperSpecialThread()); // Can only be called on native debugger helper thread
+ PRECONDITION((!ThreadStore::HoldingThreadStore()) || g_fProcessDetach);
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::ML:: running main loop\n"));
+
+ // Anbody doing helper duty is in a can't-stop range, period.
+ // Our helper thread is already in a can't-stop range, so this is particularly useful for
+ // threads doing helper duty.
+ CantStopHolder cantStopHolder;
+
+ HANDLE rghWaitSet[DRCT_COUNT_FINAL];
+
+#ifdef _DEBUG
+ DWORD dwSyncSpinCount = 0;
+#endif
+
+ // We start out just listening on RSEA and the thread control event...
+ unsigned int cWaitCount = DRCT_COUNT_INITIAL;
+ DWORD dwWaitTimeout = INFINITE;
+ rghWaitSet[DRCT_CONTROL_EVENT] = m_threadControlEvent;
+ rghWaitSet[DRCT_FAVORAVAIL] = GetFavorAvailableEvent();
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(!useTransport)
+ {
+#endif // !FEATURE_DBGIPC_TRANSPORT_VM
+ rghWaitSet[DRCT_RSEA] = m_pDCB->m_rightSideEventAvailable;
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+ else
+ {
+ rghWaitSet[DRCT_RSEA] = g_pDbgTransport->GetIPCEventReadyEvent();
+ }
+#endif // !FEATURE_DBGIPC_TRANSPORT_VM
+
+ CONTRACT_VIOLATION(ThrowsViolation);// HndCreateHandle throws, and this loop is not backstopped by any EH
+
+ // Lock holder. Don't take it yet. We take lock on this when we succeeded suspended runtime.
+ // We will release the lock later when continue happens and runtime resumes
+ Debugger::DebuggerLockHolder debugLockHolderSuspended(m_debugger, false);
+
+ while (m_run)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::ML: waiting for event.\n"));
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(!useTransport)
+ {
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+ // If there is a debugger attached, wait on its handle, too...
+ if ((cWaitCount == DRCT_COUNT_INITIAL) &&
+ m_pDCB->m_rightSideProcessHandle.ImportToLocalProcess() != NULL)
+ {
+ _ASSERTE((cWaitCount + 1) == DRCT_COUNT_FINAL);
+ rghWaitSet[DRCT_DEBUGGER_EVENT] = m_pDCB->m_rightSideProcessHandle;
+ cWaitCount = DRCT_COUNT_FINAL;
+ }
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+
+ if (m_fDetachRightSide)
+ {
+ m_fDetachRightSide = false;
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ if(!useTransport)
+ {
+#endif
+ _ASSERTE(cWaitCount == DRCT_COUNT_FINAL);
+ _ASSERTE((cWaitCount - 1) == DRCT_COUNT_INITIAL);
+
+ rghWaitSet[DRCT_DEBUGGER_EVENT] = NULL;
+ cWaitCount = DRCT_COUNT_INITIAL;
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ }
+#endif // !FEATURE_DBGIPC_TRANSPORT_VM
+ }
+
+ // Wait for an event from the Right Side.
+ DWORD dwWaitResult = WaitForMultipleObjectsEx(cWaitCount, rghWaitSet, FALSE, dwWaitTimeout, FALSE);
+
+ if (!m_run)
+ {
+ continue;
+ }
+
+
+ if (dwWaitResult == WAIT_OBJECT_0 + DRCT_DEBUGGER_EVENT)
+ {
+ // If the handle of the right side process is signaled, then we've lost our controlling debugger. We
+ // terminate this process immediatley in such a case.
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::ML: terminating this process. Right Side has exited.\n"));
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ EEPOLICY_HANDLE_FATAL_ERROR(0);
+ _ASSERTE(!"Should never reach this point.");
+ }
+ else if (dwWaitResult == WAIT_OBJECT_0 + DRCT_FAVORAVAIL)
+ {
+ // execute the callback set by DoFavor()
+ FAVORCALLBACK fpCallback = GetFavorFnPtr();
+ // We never expect the callback to be null unless some other component
+ // wrongly signals our event (see DD 463807).
+ // In case we messed up, we will not set the FavorReadEvent and will hang favor requesting thread.
+ if (fpCallback)
+ {
+ (*fpCallback)(GetFavorData());
+ SetEvent(GetFavorReadEvent());
+ }
+ }
+ else if (dwWaitResult == WAIT_OBJECT_0 + DRCT_RSEA)
+ {
+ bool fWasContinue = HandleRSEA();
+
+ if (fWasContinue)
+ {
+
+ // If they called continue, then we must have released the TSL.
+ _ASSERTE(!ThreadStore::HoldingThreadStore() || g_fProcessDetach);
+
+ // Let's release the lock here since runtime is resumed.
+ debugLockHolderSuspended.Release();
+
+ // This debugger thread shoud not be holding debugger locks anymore
+ _ASSERTE(!g_pDebugger->ThreadHoldsLock());
+#ifdef _DEBUG
+ // Always reset the syncSpinCount to 0 on a continue so that we have the maximum number of possible
+ // spins the next time we need to sync.
+ dwSyncSpinCount = 0;
+#endif
+
+ if (dwWaitTimeout != INFINITE)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::ML:: don't check for stragglers due to continue.\n"));
+
+ dwWaitTimeout = INFINITE;
+ }
+
+ }
+ }
+ else if (dwWaitResult == WAIT_OBJECT_0 + DRCT_CONTROL_EVENT)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::ML:: straggler event set.\n"));
+
+ Debugger::DebuggerLockHolder debugLockHolder(m_debugger);
+ // Make sure that we're still synchronizing...
+ if (m_debugger->IsSynchronizing())
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::ML:: dropping the timeout.\n"));
+
+ dwWaitTimeout = CorDB_SYNC_WAIT_TIMEOUT;
+
+ //
+ // Skip waiting the first time and just give it a go. Note: Implicit
+ // release of the lock, because we are leaving its scope.
+ //
+ goto LWaitTimedOut;
+ }
+#ifdef LOGGING
+ else
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::ML:: told to wait, but not syncing anymore.\n"));
+#endif
+ // dbgLockHolder goes out of scope - implicit Release
+ }
+ else if (dwWaitResult == WAIT_TIMEOUT)
+ {
+
+LWaitTimedOut:
+
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::ML:: wait timed out.\n"));
+
+ // Debugger::DebuggerLockHolder debugLockHolder(m_debugger);
+ // Explicitly get the lock here since we try to check to see if
+ // have suspended. We will release the lock if we are not suspended yet.
+ //
+ debugLockHolderSuspended.Acquire();
+
+ // We should still be synchronizing, otherwise we would not have timed out.
+ _ASSERTE(m_debugger->IsSynchronizing());
+
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::ML:: sweeping the thread list.\n"));
+
+#ifdef _DEBUG
+ // If we fail to suspend the CLR, don't bother waiting for a BVT to timeout,
+ // fire up an assert up now.
+ // Threads::m_DebugWillSyncCount+1 is the number of outstanding threads.
+ // We're trying to suspend any thread w/ TS_DebugWillSync set.
+ if (dwSyncSpinCount++ > CorDB_MAX_SYNC_SPIN_COUNT)
+ {
+ _ASSERTE_MSG(false, "Timeout trying to suspend CLR for debugging. Possibly a deadlock.\n"\
+ "You can ignore this assert to continue waiting\n");
+ dwSyncSpinCount = 0;
+ }
+#endif
+
+ // Don't call Sweep if we're doing helper thread duty.
+ // If we're doing helper thread duty, then we already Suspended the CLR, and we already hold the TSL.
+ bool fSuspended;
+ {
+ // SweepThreadsForDebug() may call new!!! ARGG!!!
+ SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
+ fSuspended = g_pEEInterface->SweepThreadsForDebug(false);
+ }
+
+ if (fSuspended)
+ {
+ STRESS_LOG0(LF_CORDB, LL_INFO1000, "DRCT::ML:: wait set empty after sweep.\n");
+
+ // There are no more threads to wait for, so go ahead and send the sync complete event.
+ m_debugger->SuspendComplete();
+ dwWaitTimeout = INFINITE;
+
+ // Note: we hold the thread store lock now and debugger lock...
+
+ // We also hold debugger lock the whole time that Runtime is stopped. We will release the debugger lock
+ // when we receive the Continue event that resumes the runtime.
+
+ _ASSERTE(ThreadStore::HoldingThreadStore() || g_fProcessDetach);
+ }
+ else
+ {
+ // If we're doing helper thread duty, then we expect to have been suspended already.
+ // And so the sweep should always succeed.
+ STRESS_LOG0(LF_CORDB, LL_INFO1000, "DRCT::ML:: threads still syncing after sweep.\n");
+ debugLockHolderSuspended.Release();
+ }
+ // debugLockHolderSuspended does not go out of scope. It has to be either released explicitly on the line above or
+ // we intend to hold the lock till we hit continue event.
+
+ }
+ }
+
+ STRESS_LOG0(LF_CORDB, LL_INFO1000, "DRCT::ML:: Exiting.\n");
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Main loop of the temporary Helper thread. It waits for IPC events
+// and dishes them out to the Debugger object for processing.
+//
+// Notes:
+// When we enter here, we are holding debugger lock and thread store lock.
+// The debugger lock was SuppressRelease in DoHelperThreadDuty. The continue event
+// that we are waiting for will trigger the corresponding release.
+//
+// IMPORTANT!!! READ ME!!!!
+// This MainLoop is similiar to MainLoop function above but simplified to deal with only
+// some scenario. So if you change here, you should look at MainLoop to see if same change is
+// required.
+//---------------------------------------------------------------------------------------
+void DebuggerRCThread::TemporaryHelperThreadMainLoop()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+
+
+ // If we come in here, this managed thread is trying to do helper thread duty.
+ // It should be holding the debugger lock!!!
+ //
+ PRECONDITION(m_debugger->ThreadHoldsLock());
+ PRECONDITION((ThreadStore::HoldingThreadStore()) || g_fProcessDetach);
+ PRECONDITION(ThisIsTempHelperThread());
+ }
+ CONTRACTL_END;
+
+ STRESS_LOG0(LF_CORDB, LL_INFO1000, "DRCT::THTML:: Doing helper thread duty, running main loop.\n");
+ // Anbody doing helper duty is in a can't-stop range, period.
+ // Our helper thread is already in a can't-stop range, so this is particularly useful for
+ // threads doing helper duty.
+ CantStopHolder cantStopHolder;
+
+ HANDLE rghWaitSet[DRCT_COUNT_FINAL];
+
+#ifdef _DEBUG
+ DWORD dwSyncSpinCount = 0;
+#endif
+
+ // We start out just listening on RSEA and the thread control event...
+ unsigned int cWaitCount = DRCT_COUNT_INITIAL;
+ DWORD dwWaitTimeout = INFINITE;
+ rghWaitSet[DRCT_CONTROL_EVENT] = m_threadControlEvent;
+ rghWaitSet[DRCT_FAVORAVAIL] = GetFavorAvailableEvent();
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(!useTransport)
+ {
+#endif
+ rghWaitSet[DRCT_RSEA] = m_pDCB->m_rightSideEventAvailable;
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+ else
+ {
+ rghWaitSet[DRCT_RSEA] = g_pDbgTransport->GetIPCEventReadyEvent();
+ }
+#endif // !FEATURE_DBGIPC_TRANSPORT_VM
+
+ CONTRACT_VIOLATION(ThrowsViolation);// HndCreateHandle throws, and this loop is not backstopped by any EH
+
+ while (m_run)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::ML: waiting for event.\n"));
+
+ // Wait for an event from the Right Side.
+ DWORD dwWaitResult = WaitForMultipleObjectsEx(cWaitCount, rghWaitSet, FALSE, dwWaitTimeout, FALSE);
+
+ if (!m_run)
+ {
+ continue;
+ }
+
+
+ if (dwWaitResult == WAIT_OBJECT_0 + DRCT_DEBUGGER_EVENT)
+ {
+ // If the handle of the right side process is signaled, then we've lost our controlling debugger. We
+ // terminate this process immediatley in such a case.
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::THTML: terminating this process. Right Side has exited.\n"));
+
+ TerminateProcess(GetCurrentProcess(), 0);
+ _ASSERTE(!"Should never reach this point.");
+ }
+ else if (dwWaitResult == WAIT_OBJECT_0 + DRCT_FAVORAVAIL)
+ {
+ // execute the callback set by DoFavor()
+ (*GetFavorFnPtr())(GetFavorData());
+
+ SetEvent(GetFavorReadEvent());
+ }
+ else if (dwWaitResult == WAIT_OBJECT_0 + DRCT_RSEA)
+ {
+ // @todo:
+ // We are only interested in dealing with Continue event here...
+ // Once we remove the HelperThread duty, this will just go away.
+ //
+ bool fWasContinue = HandleRSEA();
+
+ if (fWasContinue)
+ {
+ // If they called continue, then we must have released the TSL.
+ _ASSERTE(!ThreadStore::HoldingThreadStore() || g_fProcessDetach);
+
+#ifdef _DEBUG
+ // Always reset the syncSpinCount to 0 on a continue so that we have the maximum number of possible
+ // spins the next time we need to sync.
+ dwSyncSpinCount = 0;
+#endif
+
+ // HelperThread duty is finished. We have got a Continue message
+ goto LExit;
+ }
+ }
+ else if (dwWaitResult == WAIT_OBJECT_0 + DRCT_CONTROL_EVENT)
+ {
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::THTML:: straggler event set.\n"));
+
+ // Make sure that we're still synchronizing...
+ _ASSERTE(m_debugger->IsSynchronizing());
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::THTML:: dropping the timeout.\n"));
+
+ dwWaitTimeout = CorDB_SYNC_WAIT_TIMEOUT;
+
+ //
+ // Skip waiting the first time and just give it a go. Note: Implicit
+ // release of the lock, because we are leaving its scope.
+ //
+ goto LWaitTimedOut;
+ }
+ else if (dwWaitResult == WAIT_TIMEOUT)
+ {
+
+LWaitTimedOut:
+
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::THTML:: wait timed out.\n"));
+
+ // We should still be synchronizing, otherwise we would not have timed out.
+ _ASSERTE(m_debugger->IsSynchronizing());
+
+ LOG((LF_CORDB, LL_INFO1000, "DRCT::THTML:: sweeping the thread list.\n"));
+
+#ifdef _DEBUG
+ // If we fail to suspend the CLR, don't bother waiting for a BVT to timeout,
+ // fire up an assert up now.
+ // Threads::m_DebugWillSyncCount+1 is the number of outstanding threads.
+ // We're trying to suspend any thread w/ TS_DebugWillSync set.
+ if (dwSyncSpinCount++ > CorDB_MAX_SYNC_SPIN_COUNT)
+ {
+ _ASSERTE(false || !"Timeout trying to suspend CLR for debugging. Possibly a deadlock. "
+ "You can ignore this assert to continue waiting\n");
+ dwSyncSpinCount = 0;
+ }
+#endif
+
+ STRESS_LOG0(LF_CORDB, LL_INFO1000, "DRCT::THTML:: wait set empty after sweep.\n");
+
+ // We are holding Debugger lock (Look at the SuppressRelease on the DoHelperThreadDuty)
+ // The debugger lock will be released on the Continue event which we will then
+ // exit the loop.
+
+ // There are no more threads to wait for, so go ahead and send the sync complete event.
+ m_debugger->SuspendComplete();
+ dwWaitTimeout = INFINITE;
+
+ // Note: we hold the thread store lock now and debugger lock...
+ _ASSERTE(ThreadStore::HoldingThreadStore() || g_fProcessDetach);
+
+ }
+ }
+
+LExit:
+
+ STRESS_LOG0(LF_CORDB, LL_INFO1000, "DRCT::THTML:: Exiting.\n");
+}
+
+
+
+//
+// This is the thread's real thread proc. It simply calls to the
+// thread proc on the RCThread object.
+//
+/*static*/ DWORD WINAPI DebuggerRCThread::ThreadProcRemote(LPVOID)
+{
+ // We just wrap create a local thread and we're outta here
+ WRAPPER_NO_CONTRACT;
+
+ ClrFlsSetThreadType(ThreadType_DbgHelper);
+
+ LOG((LF_CORDB, LL_EVERYTHING, "ThreadProcRemote called\n"));
+#ifdef _DEBUG
+ dbgOnly_IdentifySpecialEEThread();
+#endif
+
+ // this method can be called both by a local createthread or a remote create thread
+ // so we must use the g_RCThread global to find the (unique!) this pointer
+ // we cannot count on the parameter.
+
+ DebuggerRCThread* t = (DebuggerRCThread*)g_pRCThread;
+
+ // This remote thread is created by the debugger process
+ // and so its ACLs will reflect permissions for the user running
+ // the debugger. If this process is running in the context of a
+ // different user then this (the now running) process will not be
+ // able to do operations on that (remote) thread.
+ //
+ // To avoid this problem, if we are the remote thread, then
+ // we simply launch a new, local, thread right here and let
+ // the remote thread die. This new thread is created the same
+ // way as always, and since it is created by this process
+ // this process will be able to synchronize with it and so forth
+
+ t->Start(); // this thread is remote, we must start a new thread
+
+ return 0;
+}
+
+//
+// This is the thread's real thread proc. It simply calls to the
+// thread proc on the RCThread object.
+//
+/*static*/ DWORD WINAPI DebuggerRCThread::ThreadProcStatic(LPVOID)
+{
+ // We just wrap the instance method DebuggerRCThread::ThreadProc
+ WRAPPER_NO_CONTRACT;
+
+ BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO();
+
+ ClrFlsSetThreadType(ThreadType_DbgHelper);
+
+ LOG((LF_CORDB, LL_EVERYTHING, "ThreadProcStatic called\n"));
+
+#ifdef _DEBUG
+ dbgOnly_IdentifySpecialEEThread();
+#endif
+
+ // We commit the thread's entire stack to ensure we're robust in low memory conditions. If we can't commit the
+ // stack, then we can't let the CLR continue to function.
+ BOOL fSuccess = Thread::CommitThreadStack(NULL);
+
+ if (!fSuccess)
+ {
+ STRESS_LOG0(LF_GC, LL_ALWAYS, "Thread::CommitThreadStack failed.\n");
+ _ASSERTE(!"Thread::CommitThreadStack failed.");
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_STACKOVERFLOW);
+ }
+
+ DebuggerRCThread* t = (DebuggerRCThread*)g_pRCThread;
+
+ t->ThreadProc(); // this thread is local, go and become the helper
+
+ END_SO_INTOLERANT_CODE;
+
+ return 0;
+}
+
+RCThreadLazyInit * DebuggerRCThread::GetLazyData()
+{
+ return g_pDebugger->GetRCThreadLazyData();
+}
+
+
+//
+// Start actually creates and starts the RC thread. It waits for the thread
+// to come up and perform initial synchronization with the Debugger
+// Interface before returning.
+//
+HRESULT DebuggerRCThread::Start(void)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ LOG((LF_CORDB, LL_EVERYTHING, "DebuggerRCThread::Start called...\n"));
+
+ DWORD helperThreadId;
+
+ if (m_thread != NULL)
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "DebuggerRCThread::Start declined to start another helper thread...\n"));
+ return S_OK;
+ }
+
+ Debugger::DebuggerLockHolder debugLockHolder(m_debugger);
+
+ if (m_thread == NULL)
+ {
+ // Create suspended so that we can sniff the tid before the thread actually runs.
+ // This may not be before the native thread-create event, but should be before everything else.
+ // Note: strange as it may seem, the Right Side depends on us
+ // using CreateThread to create the helper thread here. If you
+ // ever change this to some other thread creation routine, you
+ // need to update the logic in process.cpp where we discover the
+ // helper thread on CREATE_THREAD_DEBUG_EVENTs...
+ m_thread = CreateThread(NULL, 0, DebuggerRCThread::ThreadProcStatic,
+ NULL, CREATE_SUSPENDED, &helperThreadId );
+
+ if (m_thread == NULL)
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "DebuggerRCThread failed, err=%d\n", GetLastError()));
+ hr = HRESULT_FROM_GetLastError();
+
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "DebuggerRCThread start was successful, id=%d\n", helperThreadId));
+ }
+
+ // This gets published immediately.
+ DebuggerIPCControlBlock* dcb = GetDCB();
+ PREFIX_ASSUME(dcb != NULL);
+ dcb->m_realHelperThreadId = helperThreadId;
+
+#ifdef _DEBUG
+ // Record the OS Thread ID for debugging purposes.
+ m_DbgHelperThreadOSTid = helperThreadId ;
+#endif
+
+ if (m_thread != NULL)
+ {
+ ResumeThread(m_thread);
+ }
+
+ }
+
+ // unlock debugger lock is implied.
+
+ return hr;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Stop causes the RC thread to stop receiving events and exit.
+// It does not wait for it to exit before returning (hence "AsyncStop" instead of "Stop").
+//
+// Return Value:
+// Always S_OK at the moment.
+//
+//---------------------------------------------------------------------------------------
+HRESULT DebuggerRCThread::AsyncStop(void)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+
+#ifdef _TARGET_X86_
+ PRECONDITION(!ThisIsHelperThreadWorker());
+#else
+ PRECONDITION(!ThisIsHelperThreadWorker());
+#endif
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ m_run = FALSE;
+
+ // We need to get the helper thread out of its wait loop. So ping the thread-control event.
+ // (Don't ping RSEA since that event should be used only for IPC communication).
+ // Don't bother waiting for it to exit.
+ SetEvent(this->m_threadControlEvent);
+
+ return hr;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// This method checks that the runtime offset has been loaded, and if not, loads it.
+//
+//---------------------------------------------------------------------------------------
+HRESULT inline DebuggerRCThread::EnsureRuntimeOffsetsInit(IpcTarget ipcTarget)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_NOTRIGGER;
+
+ PRECONDITION(ThisMaybeHelperThread());
+ }
+ CONTRACTL_END;
+
+ HRESULT hr = S_OK;
+
+ if (m_rgfInitRuntimeOffsets[ipcTarget] == true)
+ {
+ hr = SetupRuntimeOffsets(m_pDCB);
+ _ASSERTE(SUCCEEDED(hr)); // throws on failure
+
+ // RuntimeOffsets structure is setup.
+ m_rgfInitRuntimeOffsets[ipcTarget] = false;
+ }
+
+ return hr;
+}
+
+//
+// Call this function to tell the rc thread that we need the runtime offsets re-initialized at the next avaliable time.
+//
+void DebuggerRCThread::NeedRuntimeOffsetsReInit(IpcTarget i)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_rgfInitRuntimeOffsets[i] = true;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Send an debug event to the Debugger. This may be either a notification
+// or a reply to a debugger query.
+//
+// Arguments:
+// iTarget - which connection. This must be IPC_TARGET_OUTOFPROC.
+//
+// Return Value:
+// S_OK on success
+//
+// Notes:
+// SendIPCEvent is used by the Debugger object to send IPC events to
+// the Debugger Interface. It waits for acknowledgement from the DI
+// before returning.
+//
+// This assumes that the event send buffer has been properly
+// filled in. All it does it wake up the DI and let it know that its
+// safe to copy the event out of this process.
+//
+// This function may block indefinitely if the controlling debugger
+// suddenly went away.
+//
+// @dbgtodo inspection - this is all a nop around SendRawEvent!
+//
+//---------------------------------------------------------------------------------------
+HRESULT DebuggerRCThread::SendIPCEvent()
+{
+ CONTRACTL
+ {
+ SO_NOT_MAINLINE;
+ NOTHROW;
+ GC_NOTRIGGER; // duh, we're in preemptive..
+
+ if (ThisIsHelperThreadWorker())
+ {
+ // When we're stopped, the helper could actually be contracted as either mode-cooperative
+ // or mode-preemptive!
+ // If we're the helper thread, we're only sending events while we're stopped.
+ // Our callers will be mode-cooperative, so call this mode_cooperative to avoid a bunch
+ // of unncessary contract violations.
+ MODE_COOPERATIVE;
+ }
+ else
+ {
+ // Managed threads sending debug events should always be in preemptive mode.
+ MODE_PREEMPTIVE;
+ }
+
+
+ PRECONDITION(ThisMaybeHelperThread());
+ }
+ CONTRACTL_END;
+
+
+ // one right side
+ _ASSERTE(m_debugger->ThreadHoldsLock());
+
+ HRESULT hr = S_OK;
+
+ // All the initialization is already done in code:DebuggerRCThread.Init,
+ // so we can just go ahead and send the event.
+
+ DebuggerIPCEvent* pManagedEvent = GetIPCEventSendBuffer();
+
+ STRESS_LOG2(LF_CORDB, LL_INFO1000, "D::SendIPCEvent %s to outofproc appD 0x%x,\n",
+ IPCENames::GetName(pManagedEvent->type),
+ VmPtrToCookie(pManagedEvent->vmAppDomain));
+
+ // increase the debug counter
+ DbgLog((DebuggerIPCEventType)(pManagedEvent->type & DB_IPCE_TYPE_MASK));
+
+ g_pDebugger->SendRawEvent(pManagedEvent);
+
+ return hr;
+}
+
+//
+// Return true if the helper thread is up & running
+//
+bool DebuggerRCThread::IsRCThreadReady()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (GetDCB() == NULL)
+ {
+ return false;
+ }
+
+ int idHelper = GetDCB()->m_helperThreadId;
+
+ // The simplest check. If the threadid isn't set, we're not ready.
+ if (idHelper == 0)
+ {
+ LOG((LF_CORDB, LL_EVERYTHING, "DRCT::IsReady - Helper not ready since DCB says id = 0.\n"));
+ return false;
+ }
+
+ // a more subtle check. It's possible the thread was up, but then
+ // an bad call to ExitProcess suddenly terminated the helper thread,
+ // leaving the threadid still non-0. So check the actual thread object
+ // and make sure it's still around.
+ int ret = WaitForSingleObject(m_thread, 0);
+ LOG((LF_CORDB, LL_EVERYTHING, "DRCT::IsReady - wait(0x%x)=%d, GetLastError() = %d\n", m_thread, ret, GetLastError()));
+
+ if (ret != WAIT_TIMEOUT)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+
+HRESULT DebuggerRCThread::ReDaclEvents(PSECURITY_DESCRIPTOR pSecurityDescriptor)
+{
+ LIMITED_METHOD_CONTRACT;
+
+#ifndef FEATURE_PAL
+ if (m_pDCB != NULL)
+ {
+ if (m_pDCB->m_rightSideEventAvailable)
+ {
+ if (SetKernelObjectSecurity(m_pDCB->m_rightSideEventAvailable,
+ DACL_SECURITY_INFORMATION,
+ pSecurityDescriptor) == 0)
+ {
+ // failed!
+ return HRESULT_FROM_GetLastError();
+ }
+ }
+ if (m_pDCB->m_rightSideEventRead)
+ {
+ if (SetKernelObjectSecurity(m_pDCB->m_rightSideEventRead,
+ DACL_SECURITY_INFORMATION,
+ pSecurityDescriptor) == 0)
+ {
+ // failed!
+ return HRESULT_FROM_GetLastError();
+ }
+ }
+ }
+#endif // FEATURE_PAL
+
+ return S_OK;
+}
+
+
+//
+// A normal thread may hit a stack overflow and so we want to do
+// any stack-intensive work on the Helper thread so that we don't
+// use up the grace memory.
+// Note that DoFavor will block until the fp is executed
+//
+void DebuggerRCThread::DoFavor(FAVORCALLBACK fp, void * pData)
+{
+ CONTRACTL
+ {
+ SO_INTOLERANT;
+ NOTHROW;
+ GC_TRIGGERS;
+
+ PRECONDITION(!ThisIsHelperThreadWorker());
+
+#ifdef PREFAST
+ // Prefast issue
+ // error C2664: 'CHECK CheckPointer(TypeHandle,IsNullOK)' : cannot convert parameter 1 from
+ // 'DebuggerRCThread::FAVORCALLBACK' to 'TypeHandle'
+#else
+ PRECONDITION(CheckPointer(fp));
+ PRECONDITION(CheckPointer(pData, NULL_OK));
+#endif
+ }
+ CONTRACTL_END;
+
+ // We are being called on managed thread only.
+ //
+
+ // We'll have problems if another thread comes in and
+ // deletes the RCThread object on us while we're in this call.
+ if (IsRCThreadReady())
+ {
+ // If the helper thread calls this, we deadlock.
+ // (Since we wait on an event that only the helper thread sets)
+ _ASSERTE(GetRCThreadId() != GetCurrentThreadId());
+
+ // Only lock if we're waiting on the helper thread.
+ // This should be the only place the FavorLock is used.
+ // Note this is never called on the helper thread.
+ CrstHolder ch(GetFavorLock());
+
+ SetFavorFnPtr(fp, pData);
+
+ // Our main message loop operating on the Helper thread will
+ // pickup that event, call the fp, and set the Read event
+ SetEvent(GetFavorAvailableEvent());
+
+ LOG((LF_CORDB, LL_INFO10000, "DRCT::DF - Waiting on FavorReadEvent for favor 0x%08x\n", fp));
+
+ // Wait for either the FavorEventRead to be set (which means that the favor
+ // was executed by the helper thread) or the helper thread's handle (which means
+ // that the helper thread exited without doing the favor, so we should do it)
+ //
+ // Note we are assuming that there's only 2 ways the helper thread can exit:
+ // 1) Someone calls ::ExitProcess, killing all threads. That will kill us too, so we're "ok".
+ // 2) Someone calls Stop(), causing the helper to exit gracefully. That's ok too. The helper
+ // didn't execute the Favor (else the FREvent would have been set first) and so we can.
+ //
+ // Beware of problems:
+ // 1) If the helper can block, we may deadlock.
+ // 2) If the helper can exit magically (or if we change the Wait to include a timeout) ,
+ // the helper thread may have not executed the favor, partially executed the favor,
+ // or totally executed the favor but not yet signaled the FavorReadEvent. We don't
+ // know what it did, so we don't know what we can do; so we're in an unstable state.
+
+ const HANDLE waitset [] = { GetFavorReadEvent(), m_thread };
+
+ // the favor worker thread will require a transition to cooperative mode in order to complete its work and we will
+ // wait for the favor to complete before terminating the process. if there is a GC in progress the favor thread
+ // will be blocked and if the thread requesting the favor is in cooperative mode we'll deadlock, so we switch to
+ // preemptive mode before waiting for the favor to complete (see Dev11 72349).
+ GCX_PREEMP();
+
+ DWORD ret = WaitForMultipleObjectsEx(
+ NumItems(waitset),
+ waitset,
+ FALSE,
+ INFINITE,
+ FALSE
+ );
+
+ DWORD wn = (ret - WAIT_OBJECT_0);
+ if (wn == 0) // m_FavorEventRead
+ {
+ // Favor was executed, nothing to do here.
+ LOG((LF_CORDB, LL_INFO10000, "DRCT::DF - favor 0x%08x finished, ret = %d\n", fp, ret));
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DRCT::DF - lost helper thread during wait, "
+ "doing favor 0x%08x on current thread\n", fp));
+
+ // Since we have no timeout, we shouldn't be able to get an error on the wait,
+ // but just in case ...
+ _ASSERTE(ret != WAIT_FAILED);
+ _ASSERTE((wn == 1) && !"DoFavor - unexpected return from WFMO");
+
+ // Thread exited without doing favor, so execute it on our thread.
+ // If we're here because of a stack overflow, this may push us over the edge,
+ // but there's nothing else we can really do
+ (*fp)(pData);
+
+ ResetEvent(GetFavorAvailableEvent());
+ }
+
+ // m_fpFavor & m_pFavorData are meaningless now. We could set them
+ // to NULL, but we may as well leave them as is to leave a trail.
+
+ }
+ else
+ {
+ LOG((LF_CORDB, LL_INFO10000, "DRCT::DF - helper thread not ready, "
+ "doing favor 0x%08x on current thread\n", fp));
+ // If helper isn't ready yet, go ahead and execute the favor
+ // on the callee's space
+ (*fp)(pData);
+ }
+
+ // Drop a log message so that we know if we survived a stack overflow or not
+ LOG((LF_CORDB, LL_INFO10000, "DRCT::DF - Favor 0x%08x completed successfully\n", fp));
+}
+
+
+//
+// SendIPCReply simply indicates to the Right Side that a reply to a
+// two-way event is ready to be read and that the last event sent from
+// the Right Side has been fully processed.
+//
+// NOTE: this assumes that the event receive buffer has been properly
+// filled in. All it does it wake up the DI and let it know that its
+// safe to copy the event out of this process.
+//
+HRESULT DebuggerRCThread::SendIPCReply()
+{
+ HRESULT hr = S_OK;
+
+#ifdef LOGGING
+ DebuggerIPCEvent* event = GetIPCEventReceiveBuffer();
+
+ LOG((LF_CORDB, LL_INFO10000, "D::SIPCR: replying with %s.\n",
+ IPCENames::GetName(event->type)));
+#endif
+
+#if defined(FEATURE_DBGIPC_TRANSPORT_VM)
+ DWORD useTransport = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_DbgUseTransport);
+ if(!useTransport)
+ {
+#endif
+ BOOL succ = SetEvent(m_pDCB->m_rightSideEventRead);
+ if (!succ)
+ {
+ hr = CORDBDebuggerSetUnrecoverableWin32Error(m_debugger, 0, false);
+ }
+#ifdef FEATURE_DBGIPC_TRANSPORT_VM
+ }
+ else
+ {
+ hr = g_pDbgTransport->SendEvent(GetIPCEventReceiveBuffer());
+ if (FAILED(hr))
+ {
+ m_debugger->UnrecoverableError(hr,
+ 0,
+ __FILE__,
+ __LINE__,
+ false);
+ }
+ }
+#endif // FEATURE_DBGIPC_TRANSPORT_VM
+
+ return hr;
+}
+
+//
+// EarlyHelperThreadDeath handles the case where the helper
+// thread has been ripped out from underneath of us by
+// ExitProcess or TerminateProcess. These calls are bad, whacking
+// all threads except the caller in the process. This can happen, for
+// instance, when an app calls ExitProcess. All threads are wacked,
+// the main thread calls all DLL main's, and the EE starts shutting
+// down in its DLL main with the helper thread terminated.
+//
+void DebuggerRCThread::EarlyHelperThreadDeath(void)
+{
+ LOG((LF_CORDB, LL_INFO10000, "DRCT::EHTD\n"));
+
+ // If we ever spun up a thread...
+ if (m_thread != NULL && m_pDCB)
+ {
+ Debugger::DebuggerLockHolder debugLockHolder(m_debugger);
+
+ m_pDCB->m_helperThreadId = 0;
+
+ LOG((LF_CORDB, LL_INFO10000, "DRCT::EHTD helperThreadId\n"));
+ // dbgLockHolder goes out of scope - implicit Release
+ }
+}
+
diff --git a/src/debug/ee/shared.cpp b/src/debug/ee/shared.cpp
new file mode 100644
index 0000000000..6d7af86893
--- /dev/null
+++ b/src/debug/ee/shared.cpp
@@ -0,0 +1,16 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+
+//
+
+/*
+ *
+ * Common source file for all files in ..\shared for compiling into the left-side
+ *
+ */
+#include "stdafx.h"
+
+#include "../shared/utils.cpp"
+#include "../shared/dbgtransportsession.cpp"
diff --git a/src/debug/ee/stdafx.cpp b/src/debug/ee/stdafx.cpp
new file mode 100644
index 0000000000..184cc8de11
--- /dev/null
+++ b/src/debug/ee/stdafx.cpp
@@ -0,0 +1,13 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: stdafx.cpp
+//
+
+//
+// Host for precompiled headers.
+//
+//*****************************************************************************
+#include "stdafx.h" // Precompiled header key.
diff --git a/src/debug/ee/stdafx.h b/src/debug/ee/stdafx.h
new file mode 100644
index 0000000000..06df0ec7ba
--- /dev/null
+++ b/src/debug/ee/stdafx.h
@@ -0,0 +1,40 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: stdafx.h
+//
+
+//
+//*****************************************************************************
+#include <stdint.h>
+#include <wchar.h>
+#include <stdio.h>
+
+#include <windows.h>
+#if !defined(FEATURE_CORECLR)
+#undef GetCurrentTime // works around a macro def conflict of GetCurrentTime
+#include <windows.ui.xaml.h>
+#endif // !FEATURE_CORECLR
+
+#include <switches.h>
+#include <winwrap.h>
+
+#ifdef DACCESS_COMPILE
+#include <specstrings.h>
+#endif
+
+#include <util.hpp>
+
+#include <dbgtargetcontext.h>
+
+#include <cordbpriv.h>
+#include <dbgipcevents.h>
+#include "debugger.h"
+#include "walker.h"
+#include "controller.h"
+#include "frameinfo.h"
+#include <corerror.h>
+#include "../inc/common.h"
+
diff --git a/src/debug/ee/walker.h b/src/debug/ee/walker.h
new file mode 100644
index 0000000000..c3ab624d8c
--- /dev/null
+++ b/src/debug/ee/walker.h
@@ -0,0 +1,237 @@
+//
+// Copyright (c) Microsoft. All rights reserved.
+// Licensed under the MIT license. See LICENSE file in the project root for full license information.
+//
+//*****************************************************************************
+// File: walker.h
+//
+
+//
+// Debugger code stream analysis routines
+//
+//*****************************************************************************
+
+#ifndef WALKER_H_
+#define WALKER_H_
+
+/* ========================================================================= */
+
+/* ------------------------------------------------------------------------- *
+ * Constants
+ * ------------------------------------------------------------------------- */
+
+enum WALK_TYPE
+{
+ WALK_NEXT,
+ WALK_BRANCH,
+ WALK_COND_BRANCH,
+ WALK_CALL,
+ WALK_RETURN,
+ WALK_BREAK,
+ WALK_THROW,
+ WALK_META,
+ WALK_UNKNOWN
+};
+
+// struct holding information for the instruction being skipped over
+struct InstructionAttribute
+{
+ bool m_fIsCall; // is this a call instruction?
+ bool m_fIsCond; // is this a conditional jump?
+ bool m_fIsAbsBranch; // is this an absolute branch (either a call or a jump)?
+ bool m_fIsRelBranch; // is this a relative branch (either a call or a jump)?
+ bool m_fIsWrite; // does the instruction write to an address?
+
+ DWORD m_cbInstr; // the size of the instruction
+ DWORD m_cbDisp; // the size of the displacement
+ DWORD m_dwOffsetToDisp; // the offset from the beginning of the instruction
+ // to the beginning of the displacement
+ BYTE m_cOperandSize; // the size of the operand
+
+ void Reset()
+ {
+ m_fIsCall = false;
+ m_fIsCond = false;
+ m_fIsAbsBranch = false;
+ m_fIsRelBranch = false;
+ m_fIsWrite = false;
+
+ m_cbInstr = 0;
+ m_cbDisp = 0;
+ m_dwOffsetToDisp = 0;
+ m_cOperandSize = 0;
+ }
+};
+
+/* ------------------------------------------------------------------------- *
+ * Classes
+ * ------------------------------------------------------------------------- */
+
+class Walker
+{
+protected:
+ Walker()
+ : m_type(WALK_UNKNOWN), m_registers(NULL), m_ip(0), m_skipIP(0), m_nextIP(0), m_isAbsoluteBranch(false)
+ {LIMITED_METHOD_CONTRACT; }
+
+public:
+
+ virtual void Init(const BYTE *ip, REGDISPLAY *pregisters)
+ {
+ PREFIX_ASSUME(pregisters != NULL);
+ _ASSERTE(GetControlPC(pregisters) == (PCODE)ip);
+
+ m_registers = pregisters;
+ SetIP(ip);
+ }
+
+ const BYTE *GetIP()
+ { return m_ip; }
+
+ WALK_TYPE GetOpcodeWalkType()
+ { return m_type; }
+
+ const BYTE *GetSkipIP()
+ { return m_skipIP; }
+
+ bool IsAbsoluteBranch()
+ { return m_isAbsoluteBranch; }
+
+ const BYTE *GetNextIP()
+ { return m_nextIP; }
+
+ // We don't currently keep the registers up to date
+ // <TODO> Check if it really works on IA64. </TODO>
+ virtual void Next() { m_registers = NULL; SetIP(m_nextIP); }
+ virtual void Skip() { m_registers = NULL; SetIP(m_skipIP); }
+
+ // Decode the instruction
+ virtual void Decode() = 0;
+
+private:
+ void SetIP(const BYTE *ip)
+ { m_ip = ip; Decode(); }
+
+protected:
+ WALK_TYPE m_type; // Type of instructions
+ REGDISPLAY *m_registers; // Registers
+ const BYTE *m_ip; // Current IP
+ const BYTE *m_skipIP; // IP if we skip the instruction
+ const BYTE *m_nextIP; // IP if the instruction is taken
+ bool m_isAbsoluteBranch; // Is it an obsolute branch or not
+};
+
+#ifdef _TARGET_X86_
+
+class NativeWalker : public Walker
+{
+public:
+ void Init(const BYTE *ip, REGDISPLAY *pregisters)
+ {
+ m_opcode = 0;
+ Walker::Init(ip, pregisters);
+ }
+
+ DWORD GetOpcode()
+ { return m_opcode; }
+/*
+ void SetRegDisplay(REGDISPLAY *registers)
+ { m_registers = registers; }
+*/
+ REGDISPLAY *GetRegDisplay()
+ { return m_registers; }
+
+ void Decode();
+ void DecodeModRM(BYTE mod, BYTE reg, BYTE rm, const BYTE *ip);
+ static void DecodeInstructionForPatchSkip(const BYTE *address, InstructionAttribute * pInstrAttrib);
+
+private:
+ DWORD GetRegisterValue(int registerNumber);
+
+ DWORD m_opcode; // Current instruction or opcode
+};
+
+#elif defined (_TARGET_ARM_)
+
+class NativeWalker : public Walker
+{
+public:
+ void Init(const BYTE *ip, REGDISPLAY *pregisters)
+ {
+ Walker::Init(ip, pregisters);
+ }
+
+ void Decode();
+
+private:
+ bool ConditionHolds(DWORD cond);
+ DWORD GetReg(DWORD reg);
+};
+
+#elif defined(_TARGET_AMD64_)
+
+class NativeWalker : public Walker
+{
+public:
+ void Init(const BYTE *ip, REGDISPLAY *pregisters)
+ {
+ m_opcode = 0;
+ Walker::Init(ip, pregisters);
+ }
+
+ DWORD GetOpcode()
+ { return m_opcode; }
+/*
+ void SetRegDisplay(REGDISPLAY *registers)
+ { m_registers = registers; }
+*/
+ REGDISPLAY *GetRegDisplay()
+ { return m_registers; }
+
+ void Decode();
+ void DecodeModRM(BYTE mod, BYTE reg, BYTE rm, const BYTE *ip);
+ static void DecodeInstructionForPatchSkip(const BYTE *address, InstructionAttribute * pInstrAttrib);
+
+private:
+ UINT64 GetRegisterValue(int registerNumber);
+
+ DWORD m_opcode; // Current instruction or opcode
+};
+
+#else
+PORTABILITY_WARNING("NativeWalker not implemented on this platform");
+class NativeWalker : public Walker
+{
+public:
+ void Init(const BYTE *ip, REGDISPLAY *pregisters)
+ {
+ m_opcode = 0;
+ Walker::Init(ip, pregisters);
+ }
+ DWORD GetOpcode()
+ { return m_opcode; }
+ void Next()
+ { Walker::Next(); }
+ void Skip()
+ { Walker::Skip(); }
+
+ void Decode()
+ {
+ PORTABILITY_ASSERT("NativeWalker not implemented on this platform");
+ m_type = WALK_UNKNOWN;
+ m_skipIP = m_ip++;
+ m_nextIP = m_ip++;
+ }
+
+ static void DecodeInstructionForPatchSkip(const BYTE *address, InstructionAttribute * pInstrAttrib)
+ {
+ PORTABILITY_ASSERT("NativeWalker not implemented on this platform");
+
+ }
+
+private:
+ DWORD m_opcode; // Current instruction or opcode
+};
+#endif
+
+#endif // WALKER_H_
diff --git a/src/debug/ee/wks/.gitmirror b/src/debug/ee/wks/.gitmirror
new file mode 100644
index 0000000000..f507630f94
--- /dev/null
+++ b/src/debug/ee/wks/.gitmirror
@@ -0,0 +1 @@
+Only contents of this folder, excluding subfolders, will be mirrored by the Git-TFS Mirror. \ No newline at end of file
diff --git a/src/debug/ee/wks/CMakeLists.txt b/src/debug/ee/wks/CMakeLists.txt
new file mode 100644
index 0000000000..82b4d7404f
--- /dev/null
+++ b/src/debug/ee/wks/CMakeLists.txt
@@ -0,0 +1,29 @@
+
+if (WIN32)
+
+if (IS_64BIT_BUILD EQUAL 1)
+ FIND_PROGRAM(ASM_COMPILER ml64.exe)
+else (IS_64BIT_BUILD EQUAL 1)
+ FIND_PROGRAM(ASM_COMPILER ml.exe)
+endif (IS_64BIT_BUILD EQUAL 1)
+
+get_include_directories(ASM_INCLUDE_DIRECTORIES)
+get_compile_definitions(ASM_DEFINITIONS)
+
+# Need to compile asm file using custom command as include directories are not provided to asm compiler
+add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/dbghelpers.obj
+ COMMAND ${ASM_COMPILER} ${ASM_INCLUDE_DIRECTORIES} ${ASM_DEFINITIONS} /c /Zi /Fo${CMAKE_CURRENT_BINARY_DIR}/dbghelpers.obj /W3 /errorReport:prompt /Ta${CORDBEE_DIR}/${ARCH_SOURCES_DIR}/dbghelpers.asm
+ DEPENDS ${CORDBEE_DIR}/${ARCH_SOURCES_DIR}/dbghelpers.asm
+ COMMENT "Compiling dbghelpers.asm")
+
+#mark obj as source that does not require compile
+set_source_files_properties(${CMAKE_CURRENT_BINARY_DIR}/dbghelpers.obj PROPERTIES EXTERNAL_OBJECT TRUE)
+
+add_library(cordbee_wks ${CORDBEE_SOURCES_WKS} ${CMAKE_CURRENT_BINARY_DIR}/dbghelpers.obj)
+
+else (WIN32)
+
+add_compile_options(-fPIC)
+add_library(cordbee_wks ${CORDBEE_SOURCES_WKS} ../amd64/dbghelpers.S)
+
+endif (WIN32)
diff --git a/src/debug/ee/wks/wks.nativeproj b/src/debug/ee/wks/wks.nativeproj
new file mode 100644
index 0000000000..7386b5d119
--- /dev/null
+++ b/src/debug/ee/wks/wks.nativeproj
@@ -0,0 +1,41 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003" ToolsVersion="dogfood">
+ <!--*****************************************************-->
+ <!--This MSBuild project file was automatically generated-->
+ <!--from the original SOURCES/DIRS file by the KBC tool.-->
+ <!--*****************************************************-->
+ <!--Import the settings-->
+ <!--Leaf project Properties-->
+ <PropertyGroup Label="Globals">
+ <SccProjectName>SAK</SccProjectName>
+ <SccAuxPath>SAK</SccAuxPath>
+ <SccLocalPath>SAK</SccLocalPath>
+ <SccProvider>SAK</SccProvider>
+ </PropertyGroup>
+ <PropertyGroup>
+ <BuildCoreBinaries>true</BuildCoreBinaries>
+ <BuildSysBinaries>true</BuildSysBinaries>
+ <OutputPath>$(ClrLibDest)</OutputPath>
+ <OutputName>cordbee_wks</OutputName>
+ <TargetType>LIBRARY</TargetType>
+ <UserAssembleAmd64IncludePath>
+ $(UserAssembleAmd64IncludePath);
+ ..\..\..\vm\AMD64;
+ </UserAssembleAmd64IncludePath>
+ </PropertyGroup>
+ <!--Leaf Project Items-->
+ <Import Project="..\EE.props" />
+ <ItemGroup>
+ <CppCompile Include="@(SourcesNodac)" />
+ <CppCompile Include="@(I386Sources)" />
+ <CppCompile Include="@(Amd64Sources)" />
+ <CppCompile Include="@(ArmSources)" />
+ <CppCompile Include="@(Arm64Sources)" />
+ <PreprocessAssembleArm Condition="'$(BuildArchitecture)' == 'arm'" Include="..\arm\dbghelpers.asm" />
+ <AssembleArm Condition="'$(BuildArchitecture)' == 'arm'" Include="$(IntermediateOutputDirectory)\dbghelpers.i" />
+ <Assemble386 Condition="'$(BuildArchitecture)' == 'i386'" Include="..\i386\dbghelpers.asm" />
+ <AssembleAmd64 Condition="'$(BuildArchitecture)' == 'amd64'" Include="..\amd64\dbghelpers.asm" />
+ </ItemGroup>
+ <!--Import the targets-->
+ <Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\clr.targets" />
+</Project>