summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/project-docs/glossary.md7
-rw-r--r--UpdateDependencies.ps156
-rwxr-xr-xbuild-packages.sh9
-rwxr-xr-xcross/build-rootfs.sh4
-rw-r--r--dir.props2
-rwxr-xr-xnetci.groovy12
-rw-r--r--src/ToolBox/SOS/Strike/disasm.cpp12
-rw-r--r--src/ToolBox/SOS/Strike/disasm.h8
-rw-r--r--src/ToolBox/SOS/Strike/disasmARM.cpp6
-rw-r--r--src/ToolBox/SOS/Strike/disasmARM64.cpp6
-rw-r--r--src/ToolBox/SOS/Strike/exts.h5
-rw-r--r--src/ToolBox/SOS/Strike/strike.cpp8
-rw-r--r--src/debug/daccess/daccess.cpp6
-rw-r--r--src/debug/daccess/enummem.cpp5
-rw-r--r--src/debug/daccess/nidump.cpp13
-rw-r--r--src/debug/daccess/request.cpp2
-rw-r--r--src/gc/gc.cpp19
-rw-r--r--src/gcdump/gcdump.cpp11
-rw-r--r--src/gcdump/gcdumpnonx86.cpp15
-rw-r--r--src/gcinfo/CMakeLists.txt1
-rw-r--r--src/gcinfo/dbggcinfoencoder.cpp981
-rw-r--r--src/gcinfo/gcinfo.settings.targets1
-rw-r--r--src/gcinfo/gcinfodumper.cpp14
-rw-r--r--src/gcinfo/gcinfoencoder.cpp84
-rw-r--r--src/inc/dbggcinfodecoder.h343
-rw-r--r--src/inc/dbggcinfoencoder.h469
-rw-r--r--src/inc/eetwain.h10
-rw-r--r--src/inc/gcdecoder.cpp2
-rw-r--r--src/inc/gcdump.h15
-rw-r--r--src/inc/gcinfo.h249
-rw-r--r--src/inc/gcinfodecoder.h29
-rw-r--r--src/inc/gcinfodumper.h4
-rw-r--r--src/inc/gcinfoencoder.h8
-rw-r--r--src/inc/gcinfotypes.h270
-rwxr-xr-xsrc/jit/codegencommon.cpp23
-rw-r--r--src/jit/codegeninterface.h3
-rwxr-xr-xsrc/jit/codegenxarch.cpp12
-rw-r--r--src/jit/compiler.h17
-rw-r--r--src/jit/emitxarch.cpp43
-rw-r--r--src/jit/flowgraph.cpp16
-rw-r--r--src/jit/gcencode.cpp8
-rw-r--r--src/jit/gentree.cpp250
-rw-r--r--src/jit/gentree.h69
-rw-r--r--src/jit/gtstructs.h2
-rw-r--r--src/jit/jitgcinfo.h4
-rw-r--r--src/jit/lower.h58
-rw-r--r--src/jit/lowerxarch.cpp258
-rw-r--r--src/jit/lsra.cpp84
-rw-r--r--src/jit/lsra.h21
-rwxr-xr-xsrc/jit/morph.cpp66
-rw-r--r--src/jit/optcse.cpp47
-rw-r--r--src/pal/src/exception/seh-unwind.cpp3
-rw-r--r--src/pal/src/exception/seh.cpp32
-rw-r--r--src/vm/CMakeLists.txt2
-rw-r--r--src/vm/codeman.cpp50
-rw-r--r--src/vm/codeman.h34
-rw-r--r--src/vm/crossgen/CMakeLists.txt1
-rw-r--r--src/vm/crossgen/wks_crossgen.nativeproj1
-rw-r--r--src/vm/dac/dacwks.targets1
-rw-r--r--src/vm/dbggcinfodecoder.cpp932
-rw-r--r--src/vm/debughelp.cpp10
-rw-r--r--src/vm/eedbginterfaceimpl.cpp6
-rw-r--r--src/vm/eetwain.cpp63
-rw-r--r--src/vm/gccover.cpp20
-rw-r--r--src/vm/gccover.h2
-rw-r--r--src/vm/gcenv.ee.cpp10
-rw-r--r--src/vm/gcinfodecoder.cpp61
-rw-r--r--src/vm/stackwalk.h7
-rw-r--r--src/vm/wks/wks.targets1
-rw-r--r--tests/buildtest.cmd46
-rw-r--r--tests/dir.props20
-rw-r--r--tests/runtest.proj4
-rwxr-xr-xtests/scripts/arm32_ci_script.sh10
73 files changed, 1295 insertions, 3688 deletions
diff --git a/Documentation/project-docs/glossary.md b/Documentation/project-docs/glossary.md
index 176da1da74..19609049a7 100644
--- a/Documentation/project-docs/glossary.md
+++ b/Documentation/project-docs/glossary.md
@@ -5,13 +5,18 @@ This glossary defines terms, both common and more niche, that are important to u
As much as possible, we should link to the most authoritative and recent source of information for a term. That approach should be the most helpful for people who want to learn more about a topic.
-* CLR: Common Language Runtime
+* BOTR: Book Of The Runtime.
+* CLR: Common Language Runtime.
* COMPlus: An early name for the .NET platform, back when it was envisioned as a successor to the COM platform (hence, "COM+"). Used in various places in the CLR infrastructure, most prominently as a common prefix for the names of internal configuration settings. Note that this is different from the product that eventually ended up being named [COM+](https://msdn.microsoft.com/en-us/library/windows/desktop/ms685978.aspx).
* COR: [Common Object Runtime](http://www.danielmoth.com/Blog/mscorlibdll.aspx). The name of .NET before it was named .NET.
* DAC: Data Access Component. An abstraction layer over the internal structures in the runtime.
* EE: Execution Engine.
+* GC: [Garbage Collector](https://github.com/dotnet/coreclr/blob/master/Documentation/botr/garbage-collection.md).
+* JIT: [Just-in-Time](https://github.com/dotnet/coreclr/blob/master/Documentation/botr/ryujit-overview.md) compiler. RyuJIT is the code name for the next generation Just-in-Time(aka "JIT") for the .NET runtime.
* LCG: Lightweight Code Generation. An early name for [dynamic methods](https://github.com/dotnet/coreclr/blob/master/src/mscorlib/src/System/Reflection/Emit/DynamicMethod.cs).
+* NGen: Native Image Generator.
* PAL: [Platform Adaptation Layer](http://archive.oreilly.com/pub/a/dotnet/2002/03/04/rotor.html). Provides an abstraction layer between the runtime and the operating system
+* PE: Portable Executable.
* ProjectN: Codename for the first version of [.NET Native for UWP](https://msdn.microsoft.com/en-us/vstudio/dotnetnative.aspx).
* ReadyToRun: A flavor of native images - command line switch of [crossgen](https://github.com/dotnet/coreclr/blob/master/src/tools/crossgen/crossgen.cpp). We do plan to add docs as part of [#227](https://github.com/dotnet/coreclr/issues/227).
* Redhawk: Codename for experimental minimal managed code runtime that evolved into [CoreRT](https://github.com/dotnet/corert/).
diff --git a/UpdateDependencies.ps1 b/UpdateDependencies.ps1
index 102dd4ee8c..8fee1b9bef 100644
--- a/UpdateDependencies.ps1
+++ b/UpdateDependencies.ps1
@@ -22,6 +22,16 @@ param(
$LatestVersion = Invoke-WebRequest $VersionFileUrl -UseBasicParsing
$LatestVersion = $LatestVersion.ToString().Trim()
+if ($DirPropsVersionElements -contains 'CoreClrExpectedPrerelease')
+{
+ # Also get list of all package versions, relative to the given prerelease version url.
+ $LatestPackagesListUrl = $VersionFileUrl -Replace 'Latest.txt', 'Latest_Packages.txt'
+ $LatestPackagesList = Invoke-WebRequest $LatestPackagesListUrl -UseBasicParsing
+ $LatestCoreCLRPackage = $LatestPackagesList -split "`n" | ?{ $_.StartsWith('Microsoft.NETCore.Runtime.CoreCLR') }
+ $LatestCoreCLRVersion = ($LatestCoreCLRPackage -split ' ')[1].Trim()
+}
+
+
# Make a nicely formatted string of the dir props version elements. Short names, joined by commas.
$DirPropsVersionNames = ($DirPropsVersionElements | %{ $_ -replace 'ExpectedPrerelease', '' }) -join ', '
@@ -34,22 +44,41 @@ function UpdateValidDependencyVersionsFile
return $false
}
- $DirPropsPath = "$PSScriptRoot\dir.props"
-
- $DirPropsContent = Get-Content $DirPropsPath | % {
- $line = $_
- $DirPropsVersionElements | % {
- $line = $line -replace `
- "<$_>.*</$_>", `
- "<$_>$LatestVersion</$_>"
- }
- $line
+ $DirPropsPaths = @("$PSScriptRoot\dir.props", "$PSScriptRoot\tests\dir.props")
+
+ $DirPropsPaths | %{
+ $DirPropsContent = Get-Content $_ | %{
+ $line = $_
+
+ $DirPropsVersionElements | %{
+ $line = $line -replace `
+ "<$_>.*</$_>", `
+ "<$_>$LatestVersion</$_>"
+ }
+
+ if ($LatestCoreCLRVersion)
+ {
+ $line = $line -replace `
+ "<CoreClrPackageVersion>.*<", `
+ "<CoreClrPackageVersion>$LatestCoreCLRVersion<"
+ }
+
+ $line
+ }
+ Set-Content $_ $DirPropsContent
}
- Set-Content $DirPropsPath $DirPropsContent
return $true
}
+# Updates all the project.json files with out of date version numbers
+function RunUpdatePackageDependencyVersions
+{
+ cmd /c $PSScriptRoot\tests\buildtest.cmd updateinvalidpackages | Out-Host
+
+ return $LASTEXITCODE -eq 0
+}
+
# Creates a Pull Request for the updated version numbers
function CreatePullRequest
{
@@ -113,6 +142,11 @@ if (!(UpdateValidDependencyVersionsFile))
Exit -1
}
+if (!(RunUpdatePackageDependencyVersions))
+{
+ Exit -1
+}
+
if (!(CreatePullRequest))
{
Exit -1
diff --git a/build-packages.sh b/build-packages.sh
index 7ad4e17677..238958eefd 100755
--- a/build-packages.sh
+++ b/build-packages.sh
@@ -156,15 +156,6 @@ if [ $? -ne 0 ]; then
exit 1
fi
- # Build the TargetingPack package
- $__ProjectRoot/Tools/dotnetcli/dotnet "$__MSBuildPath" /nologo "$__ProjectRoot/src\.nuget\Microsoft.TargetingPack.Private.CoreCLR\Microsoft.TargetingPack.Private.CoreCLR.pkgproj" /verbosity:minimal "/fileloggerparameters:Verbosity=normal;LogFile=$binclashlog" /t:Build /p:__BuildOS=$__BuildOS /p:__BuildArch=$__BuildArch /p:__BuildType=$__BuildType /p:__IntermediatesDir=$__IntermediatesDir /p:BuildNugetPackage=false /p:UseSharedCompilation=false
-
-if [ $? -ne 0 ]; then
- echo -e "\nAn error occurred. Aborting build-packages.sh ." >> $build_packages_log
- echo "ERROR: An error occurred while building packages, see $build_packages_log for more details."
- exit 1
-fi
-
echo "Done building packages."
echo -e "\nDone building packages." >> $build_packages_log
exit 0
diff --git a/cross/build-rootfs.sh b/cross/build-rootfs.sh
index dc093b8675..b786420405 100755
--- a/cross/build-rootfs.sh
+++ b/cross/build-rootfs.sh
@@ -52,12 +52,12 @@ for i in "$@"
__UbuntuCodeName=jessie
;;
vivid)
- if [ __UbuntuCodeName != "jessie" ]; then
+ if [ "$__UbuntuCodeName" != "jessie" ]; then
__UbuntuCodeName=vivid
fi
;;
wily)
- if [ __UbuntuCodeName != "jessie" ]; then
+ if [ "$__UbuntuCodeName" != "jessie" ]; then
__UbuntuCodeName=wily
fi
;;
diff --git a/dir.props b/dir.props
index 7607fb433d..f1cc63cfbc 100644
--- a/dir.props
+++ b/dir.props
@@ -153,7 +153,7 @@
<ProjectUrl>https://dot.net</ProjectUrl>
<!-- PreReleaseSuffix for packages published from closed build (e.g. CoreCLR for Arm32, APISet, etc) -->
- <ExternalExpectedPrerelease>beta-24319-00</ExternalExpectedPrerelease>
+ <ExternalExpectedPrerelease>beta-24320-00</ExternalExpectedPrerelease>
<!-- On Windows, MSbuild still runs against Desktop FX while it runs on .NET Core on non-Windows. this requires
pulling in different packaging dependencies.
diff --git a/netci.groovy b/netci.groovy
index 845fa6133a..a34ccedd8e 100755
--- a/netci.groovy
+++ b/netci.groovy
@@ -1,7 +1,6 @@
// Import the utility functionality.
-import jobs.generation.Utilities;
-import jobs.generation.JobReport;
+import jobs.generation.*
// The input project name (e.g. dotnet/coreclr)
def project = GithubProject
@@ -561,7 +560,7 @@ def static addTriggers(def job, def branch, def isPR, def architecture, def os,
case 'Ubuntu16.04':
assert !isFlowJob
assert scenario == 'default'
- Utilities.addGithubPRTriggerForBranch(job, branch, "${os} ${architecture} ${configuration} Build", '(?i).*test\\W+${os}\\W+.*')
+ Utilities.addGithubPRTriggerForBranch(job, branch, "${os} ${architecture} ${configuration} Build", "(?i).*test\\W+${os}\\W+.*")
break
case 'Ubuntu':
case 'OSX':
@@ -2253,6 +2252,13 @@ combinedScenarios.each { scenario ->
addEmailPublisher(newJob, 'clrcoverage@microsoft.com')
}
+ // Experimental: If on Ubuntu 14.04, then attempt to pull in crash dump links
+ if (os in ['Ubuntu']) {
+ SummaryBuilder summaries = new SummaryBuilder()
+ summaries.addLinksSummaryFromFile('Crash dumps from this run:', 'dumplings.txt')
+ summaries.emit(newJob)
+ }
+
setMachineAffinity(newJob, os, architecture)
Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
// Set timeouts to 240.
diff --git a/src/ToolBox/SOS/Strike/disasm.cpp b/src/ToolBox/SOS/Strike/disasm.cpp
index 097f4cd14c..e141f8038f 100644
--- a/src/ToolBox/SOS/Strike/disasm.cpp
+++ b/src/ToolBox/SOS/Strike/disasm.cpp
@@ -9,6 +9,7 @@
// ==--==
#include "strike.h"
+#include "gcinfo.h"
#include "util.h"
#include <dbghelp.h>
#include <limits.h>
@@ -1058,10 +1059,11 @@ void PrintNothing (const char *fmt, ...)
///
/// Dump X86 GCInfo header and table
///
-void X86Machine::DumpGCInfo(BYTE* pTable, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const
+void X86Machine::DumpGCInfo(GCInfoToken gcInfoToken, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const
{
X86GCDump::InfoHdr header;
- X86GCDump::GCDump gcDump(encBytes, 5, true);
+ X86GCDump::GCDump gcDump(gcInfoToken.Version, encBytes, 5, true);
+ BYTE* pTable = dac_cast<PTR_BYTE>(gcInfoToken.Info);
if (bPrintHeader)
{
gcDump.gcPrintf = gcPrintf;
@@ -1107,17 +1109,17 @@ LPCSTR AMD64Machine::s_SPName = "RSP";
///
/// Dump AMD64 GCInfo table
///
-void AMD64Machine::DumpGCInfo(BYTE* pTable, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const
+void AMD64Machine::DumpGCInfo(GCInfoToken gcInfoToken, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const
{
if (bPrintHeader)
{
ExtOut("Pointer table:\n");
}
- GCDump gcDump(encBytes, 5, true);
+ GCDump gcDump(gcInfoToken.Version, encBytes, 5, true);
gcDump.gcPrintf = gcPrintf;
- gcDump.DumpGCTable(pTable, methodSize, 0);
+ gcDump.DumpGCTable(dac_cast<PTR_BYTE>(gcInfoToken.Info), methodSize, 0);
}
#endif // SOS_TARGET_AMD64
diff --git a/src/ToolBox/SOS/Strike/disasm.h b/src/ToolBox/SOS/Strike/disasm.h
index 6972c39ccb..59fc168a6e 100644
--- a/src/ToolBox/SOS/Strike/disasm.h
+++ b/src/ToolBox/SOS/Strike/disasm.h
@@ -159,7 +159,7 @@ public:
virtual void GetGCRegisters(LPCSTR** regNames, unsigned int* cntRegs) const
{ _ASSERTE(cntRegs != NULL); *regNames = s_GCRegs; *cntRegs = _countof(s_GCRegs); }
- virtual void DumpGCInfo(BYTE* pTable, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const;
+ virtual void DumpGCInfo(GCInfoToken gcInfoToken, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const;
private:
X86Machine() {}
@@ -225,7 +225,7 @@ public:
virtual void GetGCRegisters(LPCSTR** regNames, unsigned int* cntRegs) const
{ _ASSERTE(cntRegs != NULL); *regNames = s_GCRegs; *cntRegs = _countof(s_GCRegs); }
- virtual void DumpGCInfo(BYTE* pTable, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const;
+ virtual void DumpGCInfo(GCInfoToken gcInfoToken, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const;
private:
ARMMachine() {}
@@ -293,7 +293,7 @@ public:
virtual void GetGCRegisters(LPCSTR** regNames, unsigned int* cntRegs) const
{ _ASSERTE(cntRegs != NULL); *regNames = s_GCRegs; *cntRegs = _countof(s_GCRegs); }
- virtual void DumpGCInfo(BYTE* pTable, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const;
+ virtual void DumpGCInfo(GCInfoToken gcInfoToken, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const;
private:
AMD64Machine() {}
@@ -357,7 +357,7 @@ public:
virtual void GetGCRegisters(LPCSTR** regNames, unsigned int* cntRegs) const
{ _ASSERTE(cntRegs != NULL); *regNames = s_GCRegs; *cntRegs = _countof(s_GCRegs);}
- virtual void DumpGCInfo(BYTE* pTable, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const;
+ virtual void DumpGCInfo(GCInfoToken gcInfoToken, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const;
private:
ARM64Machine() {}
diff --git a/src/ToolBox/SOS/Strike/disasmARM.cpp b/src/ToolBox/SOS/Strike/disasmARM.cpp
index 80dce71890..a82d4b9b65 100644
--- a/src/ToolBox/SOS/Strike/disasmARM.cpp
+++ b/src/ToolBox/SOS/Strike/disasmARM.cpp
@@ -607,7 +607,7 @@ BOOL ARMMachine::GetExceptionContext (TADDR stack, TADDR PC, TADDR *cxrAddr, CRO
///
/// Dump ARM GCInfo table
///
-void ARMMachine::DumpGCInfo(BYTE* pTable, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const
+void ARMMachine::DumpGCInfo(GCInfoToken gcInfoToken, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const
{
#ifndef FEATURE_PAL
if (bPrintHeader)
@@ -615,10 +615,10 @@ void ARMMachine::DumpGCInfo(BYTE* pTable, unsigned methodSize, printfFtn gcPrint
ExtOut("Pointer table:\n");
}
- ARMGCDump::GCDump gcDump(encBytes, 5, true);
+ ARMGCDump::GCDump gcDump(gcInfoToken.Version, encBytes, 5, true);
gcDump.gcPrintf = gcPrintf;
- gcDump.DumpGCTable(pTable, methodSize, 0);
+ gcDump.DumpGCTable(dac_cast<PTR_BYTE>(gcInfoToken.Info), methodSize, 0);
#endif // !FEATURE_PAL
}
diff --git a/src/ToolBox/SOS/Strike/disasmARM64.cpp b/src/ToolBox/SOS/Strike/disasmARM64.cpp
index 2c581bc946..4ac8c59105 100644
--- a/src/ToolBox/SOS/Strike/disasmARM64.cpp
+++ b/src/ToolBox/SOS/Strike/disasmARM64.cpp
@@ -377,16 +377,16 @@ BOOL ARM64Machine::GetExceptionContext (TADDR stack, TADDR PC, TADDR *cxrAddr, C
///
/// Dump ARM GCInfo table
///
-void ARM64Machine::DumpGCInfo(BYTE* pTable, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const
+void ARM64Machine::DumpGCInfo(GCInfoToken gcInfoToken, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const
{
if (bPrintHeader)
{
ExtOut("Pointer table:\n");
}
- ARM64GCDump::GCDump gcDump(encBytes, 5, true);
+ ARM64GCDump::GCDump gcDump(gcInfoToken.Version, encBytes, 5, true);
gcDump.gcPrintf = gcPrintf;
- gcDump.DumpGCTable(pTable, methodSize, 0);
+ gcDump.DumpGCTable(dac_cast<PTR_BYTE>(gcInfoToken.Info), methodSize, 0);
}
diff --git a/src/ToolBox/SOS/Strike/exts.h b/src/ToolBox/SOS/Strike/exts.h
index baef6d7084..36b5230c37 100644
--- a/src/ToolBox/SOS/Strike/exts.h
+++ b/src/ToolBox/SOS/Strike/exts.h
@@ -23,7 +23,6 @@
#pragma warning(disable:4430) // missing type specifier: C++ doesn't support default-int
#endif
#include "strike.h"
-
#include <wdbgexts.h>
#include <dbgeng.h>
#include <stdio.h>
@@ -43,6 +42,8 @@
// the DAC to read the DAC-ized data structures.
#include "daccess.h"
+#include "gcinfo.h"
+
// Convert between CLRDATA_ADDRESS and TADDR.
#define TO_TADDR(cdaddr) ((TADDR)(cdaddr))
#define TO_CDADDR(taddr) ((CLRDATA_ADDRESS)(LONG_PTR)(taddr))
@@ -386,7 +387,7 @@ public:
typedef void (*printfFtn)(const char* fmt, ...);
// Dumps the GCInfo
- virtual void DumpGCInfo(BYTE* pTable, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const = 0;
+ virtual void DumpGCInfo(GCInfoToken gcInfoToken, unsigned methodSize, printfFtn gcPrintf, bool encBytes, bool bPrintHeader) const = 0;
protected:
IMachine() {}
diff --git a/src/ToolBox/SOS/Strike/strike.cpp b/src/ToolBox/SOS/Strike/strike.cpp
index 3b0086fdb7..df4a18443f 100644
--- a/src/ToolBox/SOS/Strike/strike.cpp
+++ b/src/ToolBox/SOS/Strike/strike.cpp
@@ -8017,10 +8017,10 @@ DECLARE_API(GCInfo)
// Mutable table pointer since we need to pass the appropriate
// offset into the table to DumpGCTable.
- BYTE *pTable = table;
+ GCInfoToken gcInfoToken = { table, GCINFO_VERSION };
unsigned int methodSize = (unsigned int)codeHeaderData.MethodSize;
- g_targetMachine->DumpGCInfo(pTable, methodSize, ExtOut, true /*encBytes*/, true /*bPrintHeader*/);
+ g_targetMachine->DumpGCInfo(gcInfoToken, methodSize, ExtOut, true /*encBytes*/, true /*bPrintHeader*/);
return Status;
}
@@ -8101,8 +8101,8 @@ void DecodeGCTableEntry (const char *fmt, ...)
VOID CALLBACK DumpGCTableFiberEntry (LPVOID pvGCEncodingInfo)
{
GCEncodingInfo *pInfo = (GCEncodingInfo*)pvGCEncodingInfo;
-
- g_targetMachine->DumpGCInfo(pInfo->table, pInfo->methodSize, DecodeGCTableEntry, false /*encBytes*/, false /*bPrintHeader*/);
+ GCInfoToken gcInfoToken = { pInfo, GCINFO_VERSION };
+ g_targetMachine->DumpGCInfo(gcInfoToken, pInfo->methodSize, DecodeGCTableEntry, false /*encBytes*/, false /*bPrintHeader*/);
pInfo->fDoneDecoding = true;
SwitchToFiber(pInfo->pvMainFiber);
diff --git a/src/debug/daccess/daccess.cpp b/src/debug/daccess/daccess.cpp
index 4f500d9e6a..ba3995b1f7 100644
--- a/src/debug/daccess/daccess.cpp
+++ b/src/debug/daccess/daccess.cpp
@@ -2339,7 +2339,7 @@ namespace serialization { namespace bin {
};
template <typename _Ty>
- class is_blittable<_Ty, typename std::enable_if<std::is_arithmetic<_Ty>::value>::type>
+ struct is_blittable<_Ty, typename std::enable_if<std::is_arithmetic<_Ty>::value>::type>
: std::true_type
{ // determines whether _Ty is blittable
};
@@ -2347,7 +2347,7 @@ namespace serialization { namespace bin {
// allow types to declare themselves blittable by including a static bool
// member "is_blittable".
template <typename _Ty>
- class is_blittable<_Ty, typename std::enable_if<_Ty::is_blittable>::type>
+ struct is_blittable<_Ty, typename std::enable_if<_Ty::is_blittable>::type>
: std::true_type
{ // determines whether _Ty is blittable
};
@@ -6012,7 +6012,7 @@ ClrDataAccess::GetMethodExtents(MethodDesc* methodDesc,
EECodeInfo codeInfo(methodStart);
_ASSERTE(codeInfo.IsValid());
- TADDR codeSize = codeInfo.GetCodeManager()->GetFunctionSize(codeInfo.GetGCInfo());
+ TADDR codeSize = codeInfo.GetCodeManager()->GetFunctionSize(codeInfo.GetGCInfoToken());
*extents = new (nothrow) METH_EXTENTS;
if (!*extents)
diff --git a/src/debug/daccess/enummem.cpp b/src/debug/daccess/enummem.cpp
index f88fb628ba..068c2f2b13 100644
--- a/src/debug/daccess/enummem.cpp
+++ b/src/debug/daccess/enummem.cpp
@@ -979,10 +979,11 @@ HRESULT ClrDataAccess::EnumMemWalkStackHelper(CLRDataEnumMemoryFlags flags,
codeInfo.GetJitManager()->IsFilterFunclet(&codeInfo);
// The stackwalker needs GC info to find the parent 'stack pointer' or PSP
- PTR_BYTE pGCInfo = dac_cast<PTR_BYTE>(codeInfo.GetGCInfo());
+ GCInfoToken gcInfoToken = codeInfo.GetGCInfoToken();
+ PTR_BYTE pGCInfo = dac_cast<PTR_BYTE>(gcInfoToken.Info);
if (pGCInfo != NULL)
{
- GcInfoDecoder gcDecoder(pGCInfo, DECODE_PSP_SYM, 0);
+ GcInfoDecoder gcDecoder(gcInfoToken, DECODE_PSP_SYM, 0);
DacEnumMemoryRegion(dac_cast<TADDR>(pGCInfo), gcDecoder.GetNumBytesRead(), true);
}
}
diff --git a/src/debug/daccess/nidump.cpp b/src/debug/daccess/nidump.cpp
index 6de9ec0b94..d151a54212 100644
--- a/src/debug/daccess/nidump.cpp
+++ b/src/debug/daccess/nidump.cpp
@@ -3093,7 +3093,8 @@ void NativeImageDumper::DumpCompleteMethod(PTR_Module module, MethodIterator& mi
unsigned gcInfoSize = UINT_MAX;
//parse GCInfo for size information.
- PTR_CBYTE gcInfo = dac_cast<PTR_CBYTE>(mi.GetGCInfo());
+ GCInfoToken gcInfoToken = mi.GetGCInfoToken();
+ PTR_CBYTE gcInfo = dac_cast<PTR_CBYTE>(gcInfoToken.Info);
void (* stringOutFn)(const char *, ...);
IF_OPT(GC_INFO)
@@ -3108,10 +3109,10 @@ void NativeImageDumper::DumpCompleteMethod(PTR_Module module, MethodIterator& mi
{
PTR_CBYTE curGCInfoPtr = gcInfo;
g_holdStringOutData.Clear();
- GCDump gcDump;
+ GCDump gcDump(gcInfoToken.Version);
gcDump.gcPrintf = stringOutFn;
#if !defined(_TARGET_X86_) && defined(USE_GC_INFO_DECODER)
- GcInfoDecoder gcInfoDecoder(curGCInfoPtr, DECODE_CODE_LENGTH, 0);
+ GcInfoDecoder gcInfoDecoder(gcInfoToken, DECODE_CODE_LENGTH, 0);
methodSize = gcInfoDecoder.GetCodeLength();
#endif
@@ -3119,7 +3120,7 @@ void NativeImageDumper::DumpCompleteMethod(PTR_Module module, MethodIterator& mi
#ifdef _TARGET_X86_
InfoHdr hdr;
stringOutFn( "method info Block:\n" );
- curGCInfoPtr += gcDump.DumpInfoHdr(curGCInfoPtr, &hdr, &methodSize, 0);
+ curGCInfoPtr += gcDump.DumpInfoHdr(PTR_CBYTE(gcInfoToken.Info), &hdr, &methodSize, 0);
stringOutFn( "\n" );
#endif
@@ -9436,10 +9437,10 @@ void NativeImageDumper::DumpReadyToRunMethod(PCODE pEntryPoint, PTR_RUNTIME_FUNC
{
PTR_CBYTE curGCInfoPtr = gcInfo;
g_holdStringOutData.Clear();
- GCDump gcDump;
+ GCDump gcDump(GCINFO_VERSION);
gcDump.gcPrintf = stringOutFn;
#if !defined(_TARGET_X86_) && defined(USE_GC_INFO_DECODER)
- GcInfoDecoder gcInfoDecoder(curGCInfoPtr, DECODE_CODE_LENGTH, 0);
+ GcInfoDecoder gcInfoDecoder({ curGCInfoPtr, GCINFO_VERSION }, DECODE_CODE_LENGTH, 0);
methodSize = gcInfoDecoder.GetCodeLength();
#endif
diff --git a/src/debug/daccess/request.cpp b/src/debug/daccess/request.cpp
index 9e864769c4..62dd5f51f9 100644
--- a/src/debug/daccess/request.cpp
+++ b/src/debug/daccess/request.cpp
@@ -1148,7 +1148,7 @@ ClrDataAccess::GetCodeHeaderData(CLRDATA_ADDRESS ip, struct DacpCodeHeaderData *
codeHeaderData->MethodStart =
(CLRDATA_ADDRESS) codeInfo.GetStartAddress();
- size_t methodSize = codeInfo.GetCodeManager()->GetFunctionSize(codeInfo.GetGCInfo());
+ size_t methodSize = codeInfo.GetCodeManager()->GetFunctionSize(codeInfo.GetGCInfoToken());
_ASSERTE(FitsIn<DWORD>(methodSize));
codeHeaderData->MethodSize = static_cast<DWORD>(methodSize);
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index ae4c55761d..a3792e1516 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -15342,21 +15342,6 @@ void gc_heap::gc1()
assert (ephemeral_high == heap_segment_reserved (ephemeral_heap_segment));
#endif //BACKGROUND_GC
- int bottom_gen = 0;
-#ifdef BACKGROUND_GC
- if (settings.concurrent)
- {
- bottom_gen = max_generation;
- }
-#endif //BACKGROUND_GC
- {
- for (int gen_number = bottom_gen; gen_number <= max_generation+1; gen_number++)
- {
- dynamic_data* dd = dynamic_data_of (gen_number);
- dd_new_allocation(dd) = dd_gc_new_allocation (dd);
- }
- }
-
if (fgn_maxgen_percent)
{
if (settings.condemned_generation == (max_generation - 1))
@@ -29848,6 +29833,7 @@ size_t gc_heap::compute_in (int gen_number)
}
dd_gc_new_allocation (dd) -= in;
+ dd_new_allocation (dd) = dd_gc_new_allocation (dd);
gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
gc_generation_data* gen_data = &(current_gc_data_per_heap->gen_data[gen_number]);
@@ -30020,6 +30006,8 @@ void gc_heap::compute_new_dynamic_data (int gen_number)
gen_data->npinned_surv = dd_survived_size (dd) - dd_pinned_survived_size (dd);
dd_gc_new_allocation (dd) = dd_desired_allocation (dd);
+ dd_new_allocation (dd) = dd_gc_new_allocation (dd);
+
//update counter
dd_promoted_size (dd) = out;
if (gen_number == max_generation)
@@ -30035,6 +30023,7 @@ void gc_heap::compute_new_dynamic_data (int gen_number)
dd_desired_allocation (dd) = desired_new_allocation (dd, out, max_generation+1, 0);
dd_gc_new_allocation (dd) = Align (dd_desired_allocation (dd),
get_alignment_constant (FALSE));
+ dd_new_allocation (dd) = dd_gc_new_allocation (dd);
gen_data = &(current_gc_data_per_heap->gen_data[max_generation+1]);
gen_data->size_after = total_gen_size;
diff --git a/src/gcdump/gcdump.cpp b/src/gcdump/gcdump.cpp
index d2fda049fc..1c512c88e0 100644
--- a/src/gcdump/gcdump.cpp
+++ b/src/gcdump/gcdump.cpp
@@ -18,8 +18,9 @@
-GCDump::GCDump(bool encBytes, unsigned maxEncBytes, bool dumpCodeOffs)
- : fDumpEncBytes (encBytes ),
+GCDump::GCDump(UINT32 gcInfoVer, bool encBytes, unsigned maxEncBytes, bool dumpCodeOffs)
+ : gcInfoVersion (gcInfoVer),
+ fDumpEncBytes (encBytes ),
cMaxEncBytes (maxEncBytes ),
fDumpCodeOffsets(dumpCodeOffs)
{
@@ -32,7 +33,7 @@ GCDump::GCDump(bool encBytes, unsigned maxEncBytes, bool dumpCodeOffs)
* Display the byte encodings for the given range of the GC tables.
*/
-PTR_CBYTE GCDump::DumpEncoding(PTR_CBYTE table, int cDumpBytes)
+PTR_CBYTE GCDump::DumpEncoding(PTR_CBYTE gcInfoBlock, int cDumpBytes)
{
_ASSERTE((cDumpBytes >= 0) && (cMaxEncBytes < 256));
@@ -42,7 +43,7 @@ PTR_CBYTE GCDump::DumpEncoding(PTR_CBYTE table, int cDumpBytes)
unsigned count;
int cBytesLeft;
- for (count = cMaxEncBytes, cBytesLeft = cDumpBytes, pCurPos = table;
+ for (count = cMaxEncBytes, cBytesLeft = cDumpBytes, pCurPos = gcInfoBlock;
count > 0;
count--, pCurPos++, cBytesLeft--)
{
@@ -60,7 +61,7 @@ PTR_CBYTE GCDump::DumpEncoding(PTR_CBYTE table, int cDumpBytes)
gcPrintf("| ");
}
- return table + cDumpBytes;
+ return gcInfoBlock + cDumpBytes;
}
/*****************************************************************************/
diff --git a/src/gcdump/gcdumpnonx86.cpp b/src/gcdump/gcdumpnonx86.cpp
index 8167d3abd8..53e16ffbff 100644
--- a/src/gcdump/gcdumpnonx86.cpp
+++ b/src/gcdump/gcdumpnonx86.cpp
@@ -78,8 +78,9 @@ PCSTR GetRegName (UINT32 regnum)
/*****************************************************************************/
-GCDump::GCDump(bool encBytes, unsigned maxEncBytes, bool dumpCodeOffs)
- : fDumpEncBytes (encBytes ),
+GCDump::GCDump(UINT32 gcInfoVer, bool encBytes, unsigned maxEncBytes, bool dumpCodeOffs)
+ : gcInfoVersion(gcInfoVer),
+ fDumpEncBytes (encBytes ),
cMaxEncBytes (maxEncBytes ),
fDumpCodeOffsets(dumpCodeOffs)
{
@@ -270,11 +271,12 @@ BOOL StackSlotStateChangeCallback (
}
-size_t GCDump::DumpGCTable(PTR_CBYTE table,
+size_t GCDump::DumpGCTable(PTR_CBYTE gcInfoBlock,
unsigned methodSize,
bool verifyGCTables)
{
- GcInfoDecoder hdrdecoder(table,
+ GCInfoToken gcInfoToken = { dac_cast<PTR_VOID>(gcInfoBlock), gcInfoVersion };
+ GcInfoDecoder hdrdecoder(gcInfoToken,
(GcInfoDecoderFlags)( DECODE_SECURITY_OBJECT
| DECODE_GS_COOKIE
| DECODE_CODE_LENGTH
@@ -439,7 +441,7 @@ size_t GCDump::DumpGCTable(PTR_CBYTE table,
UINT32 cbEncodedMethodSize = hdrdecoder.GetCodeLength();
gcPrintf("Code size: %x\n", cbEncodedMethodSize);
- GcInfoDumper dumper(table);
+ GcInfoDumper dumper(gcInfoToken);
GcInfoDumpState state;
state.LastCodeOffset = -1;
@@ -520,6 +522,3 @@ void GCDump::DumpPtrsInFrame(PTR_CBYTE infoBlock,
#define VALIDATE_ROOT(isInterior, hCallBack, pObjRef) ((void)0)
#include "../vm/gcinfodecoder.cpp"
#include "../gcinfo/gcinfodumper.cpp"
-#ifdef VERIFY_GCINFO
-#include "../vm/dbggcinfodecoder.cpp"
-#endif
diff --git a/src/gcinfo/CMakeLists.txt b/src/gcinfo/CMakeLists.txt
index 11857184e3..016e1e273f 100644
--- a/src/gcinfo/CMakeLists.txt
+++ b/src/gcinfo/CMakeLists.txt
@@ -3,7 +3,6 @@ set(CMAKE_INCLUDE_CURRENT_DIR ON)
set( GCINFO_SOURCES
arraylist.cpp
gcinfoencoder.cpp
- dbggcinfoencoder.cpp
)
if(CLR_CMAKE_PLATFORM_ARCH_I386)
diff --git a/src/gcinfo/dbggcinfoencoder.cpp b/src/gcinfo/dbggcinfoencoder.cpp
deleted file mode 100644
index 98480cf26d..0000000000
--- a/src/gcinfo/dbggcinfoencoder.cpp
+++ /dev/null
@@ -1,981 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-/*****************************************************************************
- *
- * GC Information Encoding API
- *
- * This is an older well-tested implementation
- * now used to verify the real encoding
- * Define VERIFY_GCINFO to enable the verification
- *
- */
-
-#ifdef VERIFY_GCINFO
-
-#include "dbggcinfoencoder.h"
-#include "gcinfoencoder.h"
-
-
-namespace DbgGcInfo {
-
-
-#ifdef _DEBUG
- #ifndef LOGGING
- #define LOGGING
- #endif
-#endif
-#include "log.h"
-
-
-void *GcInfoEncoder::LifetimeTransitionAllocator::Alloc (void *context, SIZE_T cb)
-{
- GcInfoEncoder *pGcInfoEncoder = CONTAINING_RECORD(context, GcInfoEncoder, m_LifetimeTransitions);
- return pGcInfoEncoder->m_pAllocator->Alloc(cb);
-}
-
-void GcInfoEncoder::LifetimeTransitionAllocator::Free (void *context, void *pv)
-{
-#ifdef MUST_CALL_JITALLOCATOR_FREE
- GcInfoEncoder *pGcInfoEncoder = CONTAINING_RECORD(context, GcInfoEncoder, m_LifetimeTransitions);
- pGcInfoEncoder->m_pAllocator->Free(pv);
-#endif
-}
-
-
-BitStreamWriter::MemoryBlockList::MemoryBlockList()
- : m_head(nullptr),
- m_tail(nullptr)
-{
-}
-
-BitStreamWriter::MemoryBlock* BitStreamWriter::MemoryBlockList::AppendNew(IJitAllocator* allocator, size_t bytes)
-{
- auto* memBlock = reinterpret_cast<MemoryBlock*>(allocator->Alloc(sizeof(MemoryBlock) + bytes));
- memBlock->m_next = nullptr;
-
- if (m_tail != nullptr)
- {
- _ASSERTE(m_head != nullptr);
- m_tail->m_next = memBlock;
- }
- else
- {
- _ASSERTE(m_head == nullptr);
- m_head = memBlock;
- }
-
- m_tail = memBlock;
- return memBlock;
-}
-
-void BitStreamWriter::MemoryBlockList::Dispose(IJitAllocator* allocator)
-{
-#ifdef MUST_CALL_JITALLOCATOR_FREE
- for (MemoryBlock* block = m_head, *next; block != nullptr; block = next)
- {
- next = block->m_next;
- allocator->Free(block);
- }
- m_head = nullptr;
- m_tail = nullptr;
-#endif
-}
-
-
-void BitStreamWriter::AllocMemoryBlock()
-{
- _ASSERTE( IS_ALIGNED( m_MemoryBlockSize, sizeof( size_t ) ) );
- m_FullyInterruptibleInfoWriter( pJitAllocator ),
- m_LifetimeTransitions( pJitAllocator )
-{
- _ASSERTE( pCorJitInfo != NULL );
- _ASSERTE( pMethodInfo != NULL );
- _ASSERTE( pJitAllocator != NULL );
-
- m_pCorJitInfo = pCorJitInfo;
- m_pMethodInfo = pMethodInfo;
- m_pAllocator = pJitAllocator;
-
-#ifdef _DEBUG
- CORINFO_METHOD_HANDLE methodHandle = pMethodInfo->ftn;
-
- // Get the name of the current method along with the enclosing class
- // or module name.
- m_MethodName = (char *)
- pCorJitInfo->getMethodName(methodHandle, (const char **)&m_ModuleName);
-#endif
-
-
- m_MappingTableSize = m_MappingTableInitialSize;
- m_SlotMappings = (GcSlotDesc*) m_pAllocator->Alloc( m_MappingTableSize*sizeof(GcSlotDesc) );
- m_NumSlotMappings = 0;
-#if 0
-#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
- m_NumSafePointsWithGcState = 0;
-#endif
-#endif
-
- m_SecurityObjectStackSlot = NO_SECURITY_OBJECT;
- m_PSPSymStackSlot = NO_PSP_SYM;
- m_GenericsInstContextStackSlot = NO_GENERICS_INST_CONTEXT;
- m_StackBaseRegister = NO_STACK_BASE_REGISTER;
- m_SizeOfEditAndContinuePreservedArea = NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA;
- m_IsVarArg = false;
- m_LastInterruptibleRangeStopOffset = 0;
- m_NumInterruptibleRanges = 0;
-
-#ifdef _DEBUG
- m_IsMappingTableFrozen = FALSE;
- m_CodeLength = 0;
-#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
- m_SizeOfStackOutgoingAndScratchArea = -1;
-#endif // FIXED_STACK_PARAMETER_SCRATCH_AREA
-#endif //_DEBUG
-}
-
-GcSlotId GcInfoEncoder::GetRegisterSlotId( UINT32 regNum, GcSlotFlags flags )
-{
- // We could lookup an existing identical slot in the mapping table (via some hashtable mechanism).
- // We just create duplicates for now.
-
-#ifdef _DEBUG
- _ASSERTE( !m_IsMappingTableFrozen );
-#endif
-
- if( m_NumSlotMappings == m_MappingTableSize )
- {
- GrowMappingTable();
- }
- _ASSERTE( m_NumSlotMappings < m_MappingTableSize );
-
- m_SlotMappings[ m_NumSlotMappings ].IsRegister = 1;
- m_SlotMappings[ m_NumSlotMappings ].Slot.RegisterNumber = regNum;
- m_SlotMappings[ m_NumSlotMappings ].IsInterior = ( flags & GC_SLOT_INTERIOR ) ? 1 : 0;
- m_SlotMappings[ m_NumSlotMappings ].IsPinned = ( flags & GC_SLOT_PINNED ) ? 1 : 0;
-
- GcSlotId newSlotId;
- newSlotId = m_NumSlotMappings++;
- return newSlotId;
-}
-
-GcSlotId GcInfoEncoder::GetStackSlotId( INT32 spOffset, GcSlotFlags flags, GcStackSlotBase spBase )
-{
- // We could lookup an existing identical slot in the mapping table (via some hashtable mechanism).
- // We just create duplicates for now.
-
-#ifdef _DEBUG
- _ASSERTE( !m_IsMappingTableFrozen );
-#endif
-
- if( m_NumSlotMappings == m_MappingTableSize )
- {
- GrowMappingTable();
- }
- _ASSERTE( m_NumSlotMappings < m_MappingTableSize );
-
- // Not valid to reference anything below the current stack pointer
- _ASSERTE(GC_SP_REL != spBase || spOffset >= 0);
-
- m_SlotMappings[ m_NumSlotMappings ].IsRegister = 0;
- m_SlotMappings[ m_NumSlotMappings ].Slot.Stack.SpOffset = spOffset;
- m_SlotMappings[ m_NumSlotMappings ].Slot.Stack.Base = spBase;
- m_SlotMappings[ m_NumSlotMappings ].IsInterior = ( flags & GC_SLOT_INTERIOR ) ? 1 : 0;
- m_SlotMappings[ m_NumSlotMappings ].IsPinned = ( flags & GC_SLOT_PINNED ) ? 1 : 0;
-
- GcSlotId newSlotId;
- newSlotId = m_NumSlotMappings++;
- return newSlotId;
-}
-
-void GcInfoEncoder::GrowMappingTable()
-{
- m_MappingTableSize *= 2;
- GcSlotDesc* newMappingTable = (GcSlotDesc*) m_pAllocator->Alloc( m_MappingTableSize * sizeof(GcSlotDesc) );
- memcpy( newMappingTable, m_SlotMappings, m_NumSlotMappings * sizeof(GcSlotDesc) );
-
-#ifdef MUST_CALL_JITALLOCATOR_FREE
- m_pAllocator->Free( m_SlotMappings );
-#endif
-
- m_SlotMappings = newMappingTable;
-}
-
-GcSlotSet::GcSlotSet( GcInfoEncoder* pEncoder )
-{
-#ifdef _DEBUG
- _ASSERTE( pEncoder->m_IsMappingTableFrozen );
-#endif
-
- m_pEncoder = pEncoder;
- m_NumBytes = ( pEncoder->m_NumSlotMappings + 7 ) / 8;
- m_Data = (BYTE*) pEncoder->m_pAllocator->Alloc( m_NumBytes );
-}
-
-// Copy constructor
-GcSlotSet::GcSlotSet( GcSlotSet & other )
-{
- m_pEncoder = other.m_pEncoder;
- m_NumBytes = other.m_NumBytes;
- m_Data = (BYTE*) other.m_pEncoder->m_pAllocator->Alloc( m_NumBytes );
- memcpy( m_Data, other.m_Data, m_NumBytes);
-}
-
-void GcSlotSet::Add( GcSlotId slotId )
-{
- _ASSERTE( slotId < m_pEncoder->m_NumSlotMappings );
- m_Data[ slotId / 8 ] |= 1 << ( slotId % 8 );
-}
-
-void GcSlotSet::Remove( GcSlotId slotId )
-{
- _ASSERTE( slotId < m_pEncoder->m_NumSlotMappings );
- m_Data[ slotId / 8 ] &= ~( 1 << ( slotId % 8 ) );
-}
-
-// Not used
-#if 0
-
-void GcSlotSet::Add( GcSlotSet & other )
-{
- _ASSERTE( m_pEncoder == other.m_pEncoder );
-
- for( int i=0; i<m_NumBytes; i++ )
- {
- m_Data[ i ] |= other.m_Data[ i ];
- }
-}
-
-void GcSlotSet::Subtract( GcSlotSet & other )
-{
- _ASSERTE( m_pEncoder == other.m_pEncoder );
-
- for( int i=0; i<m_NumBytes; i++ )
- {
- m_Data[ i ] &= ~( other.m_Data[ i ] );
- }
-}
-
-void GcSlotSet::Intersect( GcSlotSet & other )
-{
- _ASSERTE( m_pEncoder == other.m_pEncoder );
-
- for( int i=0; i<m_NumBytes; i++ )
- {
- m_Data[ i ] &= other.m_Data[ i ];
- }
-}
-
-#endif // unused
-
-
-void GcInfoEncoder::FinalizeSlotIds()
-{
-#ifdef _DEBUG
- m_IsMappingTableFrozen = TRUE;
-#endif
-}
-
-
-#if 0
-#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
-
-void GcInfoEncoder::DefineGcStateAtSafePoint(
- UINT32 instructionOffset,
- GcSlotSet &liveSlots
- )
-{
-#ifdef _DEBUG
- _ASSERTE( m_IsMappingTableFrozen );
-#endif
-
-#ifdef _DEBUG
- // Verify that any slot is not reported multiple times. This is O(n^2) but it executes only under _DEBUG
- for( INT32 i1=0; i1<((INT32)m_NumSlotMappings)-1; i1++ )
- {
- BYTE isLive1 = liveSlots.m_Data[ i1 / 8 ] & ( 1 << ( i1 % 8 ) );
- if( isLive1 )
- for( UINT32 i2=i1+1; i2<m_NumSlotMappings; i2++ )
- {
- BYTE isLive2 = liveSlots.m_Data[ i2 / 8 ] & ( 1 << ( i2 % 8 ) );
- if( isLive2 )
- {
- if( m_SlotMappings[ i1 ].IsRegister && m_SlotMappings[ i2 ].IsRegister )
- {
- _ASSERTE( m_SlotMappings[ i1 ].Slot.RegisterNumber != m_SlotMappings[ i2 ].Slot.RegisterNumber );
- }
- else if( !m_SlotMappings[ i1 ].IsRegister && !m_SlotMappings[ i2 ].IsRegister )
- {
- _ASSERTE( m_SlotMappings[ i1 ].Slot.SpOffset != m_SlotMappings[ i2 ].Slot.SpOffset );
- }
- }
- }
- }
-#endif
-
- m_PartiallyInterruptibleInfoWriter.Write( instructionOffset, 32 );
-
- UINT32 i;
- for( i=0; i<m_NumSlotMappings/8; i++ )
- m_PartiallyInterruptibleInfoWriter.Write( liveSlots.m_Data[ i ], 8 );
-
- if( m_NumSlotMappings % 8 > 0 )
- m_PartiallyInterruptibleInfoWriter.Write( liveSlots.m_Data[ i ], m_NumSlotMappings % 8 );
-
- m_NumSafePointsWithGcState++;
-}
-
-#endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
-#endif
-
-void GcInfoEncoder::DefineInterruptibleRange( UINT32 startInstructionOffset, UINT32 length )
-{
- UINT32 stopInstructionOffset = startInstructionOffset + length;
-
- size_t normStartDelta = NORMALIZE_CODE_OFFSET(startInstructionOffset) - NORMALIZE_CODE_OFFSET(m_LastInterruptibleRangeStopOffset);
- size_t normStopDelta = NORMALIZE_CODE_OFFSET(stopInstructionOffset) - NORMALIZE_CODE_OFFSET(startInstructionOffset);
- _ASSERTE(normStopDelta > 0);
-
- m_LastInterruptibleRangeStopOffset = startInstructionOffset + length;
-
- m_NumInterruptibleRanges++;
-
- m_FullyInterruptibleInfoWriter.EncodeVarLengthUnsigned(normStartDelta, INTERRUPTIBLE_RANGE_DELTA_ENCBASE);
-
- m_FullyInterruptibleInfoWriter.EncodeVarLengthUnsigned(normStopDelta-1, INTERRUPTIBLE_RANGE_DELTA_ENCBASE );
-}
-
-
-///////////////////////////////////////////////////////////////////////////
-// Tracking information
-///////////////////////////////////////////////////////////////////////////
-
-
-//
-// For inputs, pass zero as offset
-//
-
-void GcInfoEncoder::SetSlotState(
- UINT32 instructionOffset,
- GcSlotId slotId,
- GcSlotState slotState
- )
-{
- LifetimeTransition transition;
-
- transition.SlotDesc = m_SlotMappings[ slotId ];
- transition.CodeOffset = instructionOffset;
- transition.BecomesLive = ( slotState == GC_SLOT_LIVE );
-
- *( m_LifetimeTransitions.Append() ) = transition;
-}
-
-
-void GcInfoEncoder::SetIsVarArg()
-{
- m_IsVarArg = true;
-}
-
-void GcInfoEncoder::SetCodeLength( UINT32 length )
-{
- _ASSERTE( length > 0 );
- _ASSERTE( m_CodeLength == 0 || m_CodeLength == length );
- m_CodeLength = length;
-}
-
-
-void GcInfoEncoder::SetSecurityObjectStackSlot( INT32 spOffset )
-{
- _ASSERTE( spOffset != NO_SECURITY_OBJECT );
- _ASSERTE( m_SecurityObjectStackSlot == NO_SECURITY_OBJECT || m_SecurityObjectStackSlot == spOffset );
- m_SecurityObjectStackSlot = spOffset;
-}
-
-void GcInfoEncoder::SetPSPSymStackSlot( INT32 spOffsetPSPSym )
-{
- _ASSERTE( spOffsetPSPSym != NO_PSP_SYM );
- _ASSERTE( m_PSPSymStackSlot == NO_PSP_SYM || m_PSPSymStackSlot == spOffsetPSPSym );
-
- m_PSPSymStackSlot = spOffsetPSPSym;
-}
-
-void GcInfoEncoder::SetGenericsInstContextStackSlot( INT32 spOffsetGenericsContext )
-{
- _ASSERTE( spOffsetGenericsContext != NO_GENERICS_INST_CONTEXT);
- _ASSERTE( m_GenericsInstContextStackSlot == NO_GENERICS_INST_CONTEXT || m_GenericsInstContextStackSlot == spOffsetGenericsContext );
-
- m_GenericsInstContextStackSlot = spOffsetGenericsContext;
-}
-
-void GcInfoEncoder::SetStackBaseRegister( UINT32 regNum )
-{
- _ASSERTE( regNum != NO_STACK_BASE_REGISTER );
- _ASSERTE( m_StackBaseRegister == NO_STACK_BASE_REGISTER || m_StackBaseRegister == regNum );
- m_StackBaseRegister = regNum;
-}
-
-void GCInfoEncoder::SetSizeOfEditAndContinuePreservedArea( UINT32 slots )
-{
- _ASSERTE( regNum != NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA );
- _ASSERTE( m_SizeOfEditAndContinuePreservedArea == NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA );
- m_SizeOfEditAndContinuePreservedArea = slots;
-}
-
-
-
-#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
-void GcInfoEncoder::SetSizeOfStackOutgoingAndScratchArea( UINT32 size )
-{
- _ASSERTE( size != -1 );
- _ASSERTE( m_SizeOfStackOutgoingAndScratchArea == -1 || m_SizeOfStackOutgoingAndScratchArea == size );
- m_SizeOfStackOutgoingAndScratchArea = size;
-}
-#endif // FIXED_STACK_PARAMETER_SCRATCH_AREA
-
-
-int __cdecl CompareLifetimeTransitionsForQsort(const void* p1, const void* p2)
-{
- const GcInfoEncoder::LifetimeTransition* pFirst = (const GcInfoEncoder::LifetimeTransition*) p1;
- const GcInfoEncoder::LifetimeTransition* pSecond = (const GcInfoEncoder::LifetimeTransition*) p2;
-
- // All registers come before all stack slots
- if( pFirst->SlotDesc.IsRegister && !pSecond->SlotDesc.IsRegister ) return -1;
- if( !pFirst->SlotDesc.IsRegister && pSecond->SlotDesc.IsRegister ) return 1;
-
- // Then sort them by slot
- if( pFirst->SlotDesc.IsRegister )
- {
- _ASSERTE( pSecond->SlotDesc.IsRegister );
- if( pFirst->SlotDesc.Slot.RegisterNumber < pSecond->SlotDesc.Slot.RegisterNumber ) return -1;
- if( pFirst->SlotDesc.Slot.RegisterNumber > pSecond->SlotDesc.Slot.RegisterNumber ) return 1;
- }
- else
- {
- _ASSERTE( !pSecond->SlotDesc.IsRegister );
- if( pFirst->SlotDesc.Slot.Stack.SpOffset < pSecond->SlotDesc.Slot.Stack.SpOffset ) return -1;
- if( pFirst->SlotDesc.Slot.Stack.SpOffset > pSecond->SlotDesc.Slot.Stack.SpOffset ) return 1;
-
- // This is arbitrary, but we want to make sure they are considered separate slots
- if( pFirst->SlotDesc.Slot.Stack.Base < pSecond->SlotDesc.Slot.Stack.Base ) return -1;
- if( pFirst->SlotDesc.Slot.Stack.Base > pSecond->SlotDesc.Slot.Stack.Base ) return 1;
- }
-
- // Then sort them by code offset
- size_t firstOffset = pFirst->CodeOffset;
- size_t secondOffset = pSecond->CodeOffset;
- if( firstOffset < secondOffset ) return -1;
- if( firstOffset > secondOffset ) return 1;
-
- //
- // Same slot and offset. We put all the going-live transition first
- // so that the encoder will skip the remaining transitions and
- // the going-live transitions take precedence
- //
- _ASSERTE( ( pFirst->BecomesLive == 0 ) || ( pFirst->BecomesLive == 1 ) );
- _ASSERTE( ( pSecond->BecomesLive == 0 ) || ( pSecond->BecomesLive == 1 ) );
- return ( pSecond->BecomesLive - pFirst->BecomesLive );
-}
-
-
-void GcInfoEncoder::Build()
-{
- SIZE_T i;
-
- ///////////////////////////////////////////////////////////////////////
- // Method header
- ///////////////////////////////////////////////////////////////////////
-
- m_HeaderInfoWriter.Write( ( m_IsVarArg ? 1 : 0 ), 1 );
-
- if(m_SecurityObjectStackSlot != NO_SECURITY_OBJECT)
- {
- m_HeaderInfoWriter.Write( 1, 1 );
- m_HeaderInfoWriter.EncodeVarLengthSigned(NORMALIZE_STACK_SLOT(m_SecurityObjectStackSlot), SECURITY_OBJECT_STACK_SLOT_ENCBASE);
- }
- else
- {
- m_HeaderInfoWriter.Write( 0, 1 );
- }
-
- if (m_PSPSymStackSlot != NO_PSP_SYM)
- {
- m_HeaderInfoWriter.Write( 1, 1 );
- m_HeaderInfoWriter.EncodeVarLengthSigned(NORMALIZE_STACK_SLOT(m_PSPSymStackSlot), PSP_SYM_STACK_SLOT_ENCBASE);
- }
- else
- {
- m_HeaderInfoWriter.Write( 0, 1 );
- }
-
- if (m_GenericsInstContextStackSlot != NO_GENERICS_INST_CONTEXT)
- {
- m_HeaderInfoWriter.Write( 1, 1 );
- m_HeaderInfoWriter.EncodeVarLengthSigned(NORMALIZE_STACK_SLOT(m_GenericsInstContextStackSlot), GENERICS_INST_CONTEXT_STACK_SLOT_ENCBASE);
- }
- else
- {
- m_HeaderInfoWriter.Write( 0, 1 );
- }
-
- _ASSERTE( m_CodeLength > 0 );
- m_HeaderInfoWriter.EncodeVarLengthUnsigned(NORMALIZE_CODE_LENGTH(m_CodeLength), CODE_LENGTH_ENCBASE);
-
- if(m_StackBaseRegister != NO_STACK_BASE_REGISTER)
- {
- m_HeaderInfoWriter.Write( 1, 1 );
- m_HeaderInfoWriter.EncodeVarLengthUnsigned(NORMALIZE_STACK_BASE_REGISTER(m_StackBaseRegister), STACK_BASE_REGISTER_ENCBASE);
- }
- else
- {
- m_HeaderInfoWriter.Write( 0, 1 );
- }
-
- if(m_SizeOfEditAndContinuePreservedArea != NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA)
- {
- m_HeaderInfoWriter.Write( 1, 1 );
- m_HeaderInfoWriter.EncodeVarLengthUnsigned(m_SizeOfEditAndContinuePreservedArea, SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA_ENCBASE);
- }
- else
- {
- m_HeaderInfoWriter.Write( 0, 1 );
- }
-
-#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
- _ASSERTE( m_SizeOfStackOutgoingAndScratchArea != -1 );
- m_HeaderInfoWriter.EncodeVarLengthUnsigned(NORMALIZE_SIZE_OF_STACK_AREA(m_SizeOfStackOutgoingAndScratchArea), SIZE_OF_STACK_AREA_ENCBASE);
-#endif // FIXED_STACK_PARAMETER_SCRATCH_AREA
-
-
- ///////////////////////////////////////////////////////////////////////
- // Fully-interruptible: encode number of interruptible ranges
- ///////////////////////////////////////////////////////////////////////
-
- m_HeaderInfoWriter.EncodeVarLengthUnsigned(NORMALIZE_NUM_INTERRUPTIBLE_RANGES(m_NumInterruptibleRanges), NUM_INTERRUPTIBLE_RANGES_ENCBASE);
-
-#if 0
-#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
-
- ///////////////////////////////////////////////////////////////////////
- // Partially-interruptible: Encode call sites
- ///////////////////////////////////////////////////////////////////////
-
- m_HeaderInfoWriter.Write( m_NumSafePointsWithGcState, sizeof( m_NumSafePointsWithGcState ) * 8 );
-
- if( m_NumSafePointsWithGcState > 0 )
- {
- m_HeaderInfoWriter.Write( m_NumSlotMappings, sizeof( m_NumSlotMappings ) * 8 );
-
- ///////////////////////////////////////////////////////////////////////
- // Partially-interruptible: Encode slot mappings
- ///////////////////////////////////////////////////////////////////////
-
- // Assert that we can write a GcSlotDesc with a single call to BitStreamWriter.Write()
- _ASSERTE( sizeof( GcSlotDesc ) <= sizeof( size_t ) );
- for( UINT32 i=0; i<m_NumSlotMappings; i++ )
- {
- size_t data = 0;
- *( (GcSlotDesc*) &data ) = m_SlotMappings[ i ];
- m_PartiallyInterruptibleInfoWriter.Write( data, sizeof( GcSlotDesc ) * 8 );
- }
- }
-
-#endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
-#endif
-
- ///////////////////////////////////////////////////////////////////////
- // Fully-interruptible: Encode lifetime transitions
- ///////////////////////////////////////////////////////////////////////
-
- m_rgSortedTransitions = (LifetimeTransition*)m_pAllocator->Alloc(m_LifetimeTransitions.Count() * sizeof(LifetimeTransition));
- m_LifetimeTransitions.CopyTo(m_rgSortedTransitions);
-
- // Sort them first
- size_t numTransitions = m_LifetimeTransitions.Count();
- qsort(m_rgSortedTransitions, numTransitions, sizeof(LifetimeTransition), CompareLifetimeTransitionsForQsort);
-
- //------------------------------------------------------------------
- // Count registers and stack slots
- //------------------------------------------------------------------
-
- int numRegisters = 0;
- int numStackSlots = 0;
-
- if(numTransitions > 0)
- {
- i = 1;
- if(m_rgSortedTransitions[ 0 ].SlotDesc.IsRegister)
- {
- numRegisters++;
-
- for( ; i < numTransitions; i++ )
- {
- if(!(m_rgSortedTransitions[ i ].SlotDesc.IsRegister))
- {
- numStackSlots++;
- i++;
- break;
- }
- _ASSERTE(m_rgSortedTransitions[ i-1 ].SlotDesc.IsRegister);
- if((m_rgSortedTransitions[ i ].SlotDesc.Slot.RegisterNumber) != (m_rgSortedTransitions[ i-1 ].SlotDesc.Slot.RegisterNumber))
- numRegisters++;
- }
- }
- else
- {
- numStackSlots++;
- }
-
- for( ; i < numTransitions; i++ )
- {
- _ASSERTE(!(m_rgSortedTransitions[ i-1 ].SlotDesc.IsRegister));
- if((m_rgSortedTransitions[ i ].SlotDesc.Slot.Stack) != (m_rgSortedTransitions[ i-1 ].SlotDesc.Slot.Stack))
- numStackSlots++;
- }
- }
-
-
- size_t __registerSize = 0;
- size_t __stackSlotSize = 0;
- size_t __transitionSize = 0;
- size_t __numTransitions = 0;
-
-
- //------------------------------------------------------------------
- // Encode registers
- //------------------------------------------------------------------
-
- i = 0;
-
- m_FullyInterruptibleInfoWriter.EncodeVarLengthUnsigned(numRegisters, NUM_REGISTERS_ENCBASE);
-
- UINT32 lastNormRegNum = 0;
-
- for( int j=0; j < numRegisters; j++ )
- {
- _ASSERTE(m_rgSortedTransitions[ i ].SlotDesc.IsRegister);
-
- UINT32 currentRegNum = m_rgSortedTransitions[ i ].SlotDesc.Slot.RegisterNumber;
-
- // Encode slot identification
- UINT32 currentNormRegNum = NORMALIZE_REGISTER(currentRegNum);
- if( j == 0 )
- __registerSize += m_FullyInterruptibleInfoWriter.EncodeVarLengthUnsigned(currentNormRegNum, REGISTER_ENCBASE);
- else
- __registerSize += m_FullyInterruptibleInfoWriter.EncodeVarLengthUnsigned(currentNormRegNum - lastNormRegNum - 1, REGISTER_DELTA_ENCBASE);
- lastNormRegNum = currentNormRegNum;
-
- LifetimeTransition* pLastEncodedTransition = NULL;
-
- for( ; i < numTransitions; i++)
- {
- LifetimeTransition* pTransition = &(m_rgSortedTransitions[ i ]);
-
- if( !(pTransition->SlotDesc.IsRegister) || (pTransition->SlotDesc.Slot.RegisterNumber != currentRegNum))
- break;
-
- if( (pLastEncodedTransition == NULL) )
- {
- // Skip initial going-dead transitions (if any)
- if(!pTransition->BecomesLive)
- continue;
-
- // Encode first going-live transition
- size_t normCodeOffset = NORMALIZE_CODE_OFFSET(pTransition->CodeOffset)+1; // Leave 0 available as terminator
- __transitionSize += m_FullyInterruptibleInfoWriter.EncodeVarLengthUnsigned(normCodeOffset, NORM_CODE_OFFSET_DELTA_ENCBASE);
-
- __transitionSize += EncodeFullyInterruptibleSlotFlags(pTransition->SlotDesc);
-
- __numTransitions++;
- }
- else
- {
- _ASSERTE(pLastEncodedTransition->SlotDesc.IsRegister && pLastEncodedTransition->SlotDesc.Slot.RegisterNumber == currentRegNum);
-
- // Skip transitions on identical offsets
- // If there are multiple transitions on the same code offset, we'll encode the first one only
- _ASSERTE(i > 0);
- LifetimeTransition* pPrevTransition = &(m_rgSortedTransitions[ i-1 ]);
- if( (pPrevTransition->CodeOffset == pTransition->CodeOffset) )
- {
- _ASSERTE((!pPrevTransition->BecomesLive || !pTransition->BecomesLive) ||
- (pPrevTransition->SlotDesc.IsInterior == pTransition->SlotDesc.IsInterior) &&
- (pPrevTransition->SlotDesc.IsPinned == pTransition->SlotDesc.IsPinned));
- continue;
- }
-
- // Also skip redundant transitions
- if( (pLastEncodedTransition->BecomesLive == pTransition->BecomesLive) &&
- (pLastEncodedTransition->SlotDesc.IsInterior == pTransition->SlotDesc.IsInterior) &&
- (pLastEncodedTransition->SlotDesc.IsPinned == pTransition->SlotDesc.IsPinned) )
- continue;
-
- // Encode transition
- size_t normCodeOffsetDelta = NORMALIZE_CODE_OFFSET(pTransition->CodeOffset) - NORMALIZE_CODE_OFFSET(pLastEncodedTransition->CodeOffset);
- _ASSERTE(normCodeOffsetDelta != 0); // Leave 0 available as terminator
- __transitionSize += m_FullyInterruptibleInfoWriter.EncodeVarLengthUnsigned(normCodeOffsetDelta, NORM_CODE_OFFSET_DELTA_ENCBASE);
-
- if(pTransition->BecomesLive)
- {
- m_FullyInterruptibleInfoWriter.Write(1, 1);
- __transitionSize += EncodeFullyInterruptibleSlotFlags(pTransition->SlotDesc) + 1;
- }
- else
- {
- m_FullyInterruptibleInfoWriter.Write(0, 1);
- __transitionSize++;
- }
-
- __numTransitions++;
- }
-
- pLastEncodedTransition = pTransition;
- }
-
- // Encode termination for this slot
- m_FullyInterruptibleInfoWriter.EncodeVarLengthUnsigned(0, NORM_CODE_OFFSET_DELTA_ENCBASE);
- }
-
-
- //------------------------------------------------------------------
- // Encode stack slots
- //------------------------------------------------------------------
-
- m_FullyInterruptibleInfoWriter.EncodeVarLengthUnsigned(numStackSlots, NUM_STACK_SLOTS_ENCBASE);
-
- INT32 lastNormStackSlot = 0;
-
- for( int j=0; j < numStackSlots; j++ )
- {
- _ASSERTE(!m_rgSortedTransitions[ i ].SlotDesc.IsRegister);
-
- GcStackSlot currentStackSlot = m_rgSortedTransitions[ i ].SlotDesc.Slot.Stack;
-
- // Encode slot identification
- INT32 currentNormStackSlot = NORMALIZE_STACK_SLOT(currentStackSlot.SpOffset);
- if( j == 0 )
- __stackSlotSize += m_FullyInterruptibleInfoWriter.EncodeVarLengthSigned(currentNormStackSlot, STACK_SLOT_ENCBASE);
- else
- __stackSlotSize += m_FullyInterruptibleInfoWriter.EncodeVarLengthUnsigned(currentNormStackSlot - lastNormStackSlot, STACK_SLOT_DELTA_ENCBASE);
- lastNormStackSlot = currentNormStackSlot;
- _ASSERTE((currentStackSlot.Base & ~3) == 0);
- m_FullyInterruptibleInfoWriter.Write(currentStackSlot.Base, 2);
- __stackSlotSize += 2;
-
- LifetimeTransition* pLastEncodedTransition = NULL;
-
- for( ; i < numTransitions; i++)
- {
- LifetimeTransition* pTransition = &(m_rgSortedTransitions[ i ]);
-
- _ASSERTE(!pTransition->SlotDesc.IsRegister);
-
- if(pTransition->SlotDesc.Slot.Stack != currentStackSlot)
- break;
-
- if( (pLastEncodedTransition == NULL) )
- {
- // Skip initial going-dead transitions (if any)
- if(!pTransition->BecomesLive)
- continue;
-
- // Encode first going-live transition
- size_t normCodeOffset = NORMALIZE_CODE_OFFSET(pTransition->CodeOffset)+1; // Leave 0 available as terminator
- __transitionSize += m_FullyInterruptibleInfoWriter.EncodeVarLengthUnsigned(normCodeOffset, NORM_CODE_OFFSET_DELTA_ENCBASE);
-
- __transitionSize += EncodeFullyInterruptibleSlotFlags(pTransition->SlotDesc);
-
- __numTransitions++;
- }
- else
- {
- _ASSERTE(!(pLastEncodedTransition->SlotDesc.IsRegister) && pLastEncodedTransition->SlotDesc.Slot.Stack == currentStackSlot);
-
- // Skip transitions on identical offsets
- // If there are multiple transitions on the same code offset, we'll encode the first one only
- _ASSERTE(i > 0);
- LifetimeTransition* pPrevTransition = &(m_rgSortedTransitions[ i-1 ]);
- if( (pPrevTransition->CodeOffset == pTransition->CodeOffset) )
- {
- _ASSERTE((!pPrevTransition->BecomesLive || !pTransition->BecomesLive) ||
- (pPrevTransition->SlotDesc.IsInterior == pTransition->SlotDesc.IsInterior) &&
- (pPrevTransition->SlotDesc.IsPinned == pTransition->SlotDesc.IsPinned));
- continue;
- }
-
- // Also skip redundant transitions
- if( (pLastEncodedTransition->BecomesLive == pTransition->BecomesLive) &&
- (pLastEncodedTransition->SlotDesc.IsInterior == pTransition->SlotDesc.IsInterior) &&
- (pLastEncodedTransition->SlotDesc.IsPinned == pTransition->SlotDesc.IsPinned) )
- continue;
-
- // Encode transition
- size_t normCodeOffsetDelta = NORMALIZE_CODE_OFFSET(pTransition->CodeOffset) - NORMALIZE_CODE_OFFSET(pLastEncodedTransition->CodeOffset);
- _ASSERTE(normCodeOffsetDelta != 0); // Leave 0 available as terminator
- __transitionSize += m_FullyInterruptibleInfoWriter.EncodeVarLengthUnsigned(normCodeOffsetDelta, NORM_CODE_OFFSET_DELTA_ENCBASE);
-
- if(pTransition->BecomesLive)
- {
- m_FullyInterruptibleInfoWriter.Write(1, 1);
- __transitionSize += EncodeFullyInterruptibleSlotFlags(pTransition->SlotDesc) + 1;
- }
- else
- {
- m_FullyInterruptibleInfoWriter.Write(0, 1);
- __transitionSize++;
- }
-
- __numTransitions++;
- }
-
- pLastEncodedTransition = pTransition;
- }
-
- // Encode termination for this slot
- __transitionSize += m_FullyInterruptibleInfoWriter.EncodeVarLengthUnsigned(0, NORM_CODE_OFFSET_DELTA_ENCBASE);
- }
-
-}
-
-size_t GcInfoEncoder::GetByteCount()
-{
- return m_HeaderInfoWriter.GetByteCount() +
-#if 0
-#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
- m_PartiallyInterruptibleInfoWriter.GetByteCount() +
-#endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
-#endif
- m_FullyInterruptibleInfoWriter.GetByteCount();
-}
-
-//
-// Write encoded information to its final destination and frees temporary buffers.
-// The encoder shouldn't be used anymore after calling this method.
-//
-BYTE* GcInfoEncoder::Emit(BYTE* destBuffer)
-{
- size_t cbGcInfoSize = GetByteCount();
-
- _ASSERTE( destBuffer );
-
- m_HeaderInfoWriter.CopyTo( destBuffer );
- destBuffer += m_HeaderInfoWriter.GetByteCount();
- m_HeaderInfoWriter.Dispose();
-
-#if 0
-#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
- m_PartiallyInterruptibleInfoWriter.CopyTo( destBuffer );
- destBuffer += m_PartiallyInterruptibleInfoWriter.GetByteCount();
- m_PartiallyInterruptibleInfoWriter.Dispose();
-#endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
-#endif
-
- m_FullyInterruptibleInfoWriter.CopyTo( destBuffer );
- m_FullyInterruptibleInfoWriter.Dispose();
-
- return destBuffer;
-}
-
-void * GcInfoEncoder::eeAllocGCInfo (size_t blockSize)
-{
- return m_pCorJitInfo->allocGCInfo((ULONG)blockSize);
-}
-
-
-BitStreamWriter::BitStreamWriter( IJitAllocator* pAllocator )
-{
- m_pAllocator = pAllocator;
- m_BitCount = 0;
-#ifdef _DEBUG
- m_MemoryBlocksCount = 0;
-#endif
-
- // We are going to need at least one memory block, so we pre-allocate it
- AllocMemoryBlock();
- InitCurrentSlot();
-}
-
-//
-// bit 0 is the least significative bit
-// The stream encodes the first come bit in the least significant bit of each byte
-//
-void BitStreamWriter::Write( size_t data, int count )
-{
- _ASSERTE( count > 0 );
- _ASSERT( count <= sizeof( size_t )*8 );
-
- // Increment it now as we change count later on
- m_BitCount += count;
-
- if( count > m_FreeBitsInCurrentSlot )
- {
- if( m_FreeBitsInCurrentSlot > 0 )
- {
- WriteInCurrentSlot( data, m_FreeBitsInCurrentSlot );
- count -= m_FreeBitsInCurrentSlot;
- data >>= m_FreeBitsInCurrentSlot;
- }
-
- _ASSERTE( count > 0 );
-
- // Initialize the next slot
- if( ++m_pCurrentSlot >= m_OutOfBlockSlot )
- {
- // Get a new memory block
- AllocMemoryBlock();
- }
-
- InitCurrentSlot();
-
- // Write the remainder
- WriteInCurrentSlot( data, count );
- m_FreeBitsInCurrentSlot -= count;
- }
- else
- {
- WriteInCurrentSlot( data, count );
- m_FreeBitsInCurrentSlot -= count;
- // if m_FreeBitsInCurrentSlot becomes 0 a nwe slot will initialized on the next request
- }
-}
-
-
-void BitStreamWriter::CopyTo( BYTE* buffer )
-{
- int i,c;
- BYTE* source = NULL;
-
- MemoryBlock* pMemBlock = m_MemoryBlocks.Head();
- _ASSERTE( pMemBlockDesc != NULL );
- while (pMemBlock->Next() != NULL)
- {
- source = (BYTE*) pMemBlock->Contents;
- // @TODO: use memcpy instead
- for( i = 0; i < m_MemoryBlockSize; i++ )
- {
- *( buffer++ ) = *( source++ );
- }
-
- pMemBlock = pMemBlock->Next();
- }
-
- source = (BYTE*) pMemBlock->Contents;
- // The number of bytes to copy in the last block
- c = (int) ((BYTE*) ( m_pCurrentSlot + 1 ) - source - m_FreeBitsInCurrentSlot/8);
- _ASSERTE( c >= 0 );
- // @TODO: use memcpy instead
- for( i = 0; i < c; i++ )
- {
- *( buffer++ ) = *( source++ );
- }
-
-}
-
-void BitStreamWriter::Dispose()
-{
- m_MemoryBlocks.Dispose(m_pAllocator);
-}
-
-}
-
-#endif // VERIFY_GCINFO
-
diff --git a/src/gcinfo/gcinfo.settings.targets b/src/gcinfo/gcinfo.settings.targets
index f600e2a904..5c241b353a 100644
--- a/src/gcinfo/gcinfo.settings.targets
+++ b/src/gcinfo/gcinfo.settings.targets
@@ -14,7 +14,6 @@
<ItemGroup>
<CppCompile Include="..\ArrayList.cpp" />
<CppCompile Include="..\GCInfoEncoder.cpp" />
- <CppCompile Include="..\DbgGCInfoEncoder.cpp" />
</ItemGroup>
<!-- Import the targets -->
diff --git a/src/gcinfo/gcinfodumper.cpp b/src/gcinfo/gcinfodumper.cpp
index 432e7066ce..4e31871f67 100644
--- a/src/gcinfo/gcinfodumper.cpp
+++ b/src/gcinfo/gcinfodumper.cpp
@@ -21,9 +21,9 @@
#error pick suitable ADDRESS_SPACING for platform
#endif
-GcInfoDumper::GcInfoDumper (PTR_CBYTE pbGCInfo)
+GcInfoDumper::GcInfoDumper (GCInfoToken gcInfoToken)
{
- m_pbGCInfo = pbGCInfo;
+ m_gcTable = gcInfoToken;
m_pRecords = NULL;
m_gcInfoSize = 0;
}
@@ -492,7 +492,7 @@ GcInfoDumper::EnumerateStateChangesResults GcInfoDumper::EnumerateStateChanges (
//
// Decode header information
//
- GcInfoDecoder hdrdecoder(m_pbGCInfo,
+ GcInfoDecoder hdrdecoder(m_gcTable,
(GcInfoDecoderFlags)( DECODE_SECURITY_OBJECT
| DECODE_CODE_LENGTH
| DECODE_GC_LIFETIMES
@@ -617,11 +617,11 @@ PORTABILITY_ASSERT("GcInfoDumper::EnumerateStateChanges is not implemented on th
//
#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
- GcInfoDecoder safePointDecoder(m_pbGCInfo, (GcInfoDecoderFlags)0, 0);
+ GcInfoDecoder safePointDecoder(m_gcTable, (GcInfoDecoderFlags)0, 0);
#endif
{
- GcInfoDecoder untrackedDecoder(m_pbGCInfo, DECODE_GC_LIFETIMES, 0);
+ GcInfoDecoder untrackedDecoder(m_gcTable, DECODE_GC_LIFETIMES, 0);
untrackedDecoder.EnumerateUntrackedSlots(&regdisp,
0,
&LivePointerCallback,
@@ -646,7 +646,7 @@ PORTABILITY_ASSERT("GcInfoDumper::EnumerateStateChanges is not implemented on th
{
BOOL fNewInterruptible = FALSE;
- GcInfoDecoder decoder1(m_pbGCInfo,
+ GcInfoDecoder decoder1(m_gcTable,
(GcInfoDecoderFlags)( DECODE_SECURITY_OBJECT
| DECODE_CODE_LENGTH
| DECODE_VARARG
@@ -680,7 +680,7 @@ PORTABILITY_ASSERT("GcInfoDumper::EnumerateStateChanges is not implemented on th
}
#endif
- GcInfoDecoder decoder2(m_pbGCInfo,
+ GcInfoDecoder decoder2(m_gcTable,
(GcInfoDecoderFlags)( DECODE_SECURITY_OBJECT
| DECODE_CODE_LENGTH
| DECODE_VARARG
diff --git a/src/gcinfo/gcinfoencoder.cpp b/src/gcinfo/gcinfoencoder.cpp
index e6359c86c8..514a3c96be 100644
--- a/src/gcinfo/gcinfoencoder.cpp
+++ b/src/gcinfo/gcinfoencoder.cpp
@@ -12,10 +12,6 @@
#include "gcinfoencoder.h"
-#ifdef VERIFY_GCINFO
-#include "dbggcinfoencoder.h"
-#endif
-
#ifdef _DEBUG
#ifndef LOGGING
#define LOGGING
@@ -446,9 +442,6 @@ GcInfoEncoder::GcInfoEncoder(
m_Info2( pJitAllocator ),
m_InterruptibleRanges( pJitAllocator ),
m_LifetimeTransitions( pJitAllocator )
-#ifdef VERIFY_GCINFO
- , m_DbgEncoder(pCorJitInfo, pMethodInfo, pJitAllocator)
-#endif
{
#ifdef MEASURE_GCINFO
// This causes multiple complus.log files in JIT64. TODO: consider using ICorJitInfo::logMsg instead.
@@ -552,11 +545,6 @@ GcSlotId GcInfoEncoder::GetRegisterSlotId( UINT32 regNum, GcSlotFlags flags )
GcSlotId newSlotId;
newSlotId = m_NumSlots++;
-#ifdef VERIFY_GCINFO
- GcSlotId dbgSlotId = m_DbgEncoder.GetRegisterSlotId(regNum, flags);
- _ASSERTE(dbgSlotId == newSlotId);
-#endif
-
return newSlotId;
}
@@ -590,11 +578,6 @@ GcSlotId GcInfoEncoder::GetStackSlotId( INT32 spOffset, GcSlotFlags flags, GcSta
GcSlotId newSlotId;
newSlotId = m_NumSlots++;
-#ifdef VERIFY_GCINFO
- GcSlotId dbgSlotId = m_DbgEncoder.GetStackSlotId(spOffset, flags, spBase);
- _ASSERTE(dbgSlotId == newSlotId);
-#endif
-
return newSlotId;
}
@@ -624,10 +607,6 @@ void GcInfoEncoder::WriteSlotStateVector(BitStreamWriter &writer, const BitArray
void GcInfoEncoder::DefineInterruptibleRange( UINT32 startInstructionOffset, UINT32 length )
{
-#ifdef VERIFY_GCINFO
- m_DbgEncoder.DefineInterruptibleRange(startInstructionOffset, length);
-#endif
-
UINT32 stopInstructionOffset = startInstructionOffset + length;
UINT32 normStartOffset = NORMALIZE_CODE_OFFSET(startInstructionOffset);
@@ -674,10 +653,6 @@ void GcInfoEncoder::SetSlotState(
{
_ASSERTE( (m_SlotTable[ slotId ].Flags & GC_SLOT_UNTRACKED) == 0 );
-#ifdef VERIFY_GCINFO
- m_DbgEncoder.SetSlotState(instructionOffset, slotId, slotState);
-#endif
-
LifetimeTransition transition;
transition.SlotId = slotId;
@@ -693,19 +668,11 @@ void GcInfoEncoder::SetSlotState(
void GcInfoEncoder::SetIsVarArg()
{
-#ifdef VERIFY_GCINFO
- m_DbgEncoder.SetIsVarArg();
-#endif
-
m_IsVarArg = true;
}
void GcInfoEncoder::SetCodeLength( UINT32 length )
{
-#ifdef VERIFY_GCINFO
- m_DbgEncoder.SetCodeLength(length);
-#endif
-
_ASSERTE( length > 0 );
_ASSERTE( m_CodeLength == 0 || m_CodeLength == length );
m_CodeLength = length;
@@ -714,10 +681,6 @@ void GcInfoEncoder::SetCodeLength( UINT32 length )
void GcInfoEncoder::SetSecurityObjectStackSlot( INT32 spOffset )
{
-#ifdef VERIFY_GCINFO
- m_DbgEncoder.SetSecurityObjectStackSlot(spOffset);
-#endif
-
_ASSERTE( spOffset != NO_SECURITY_OBJECT );
#if defined(_TARGET_AMD64_)
_ASSERTE( spOffset < 0x10 && "The security object cannot reside in an input variable!" );
@@ -751,10 +714,6 @@ void GcInfoEncoder::SetGSCookieStackSlot( INT32 spOffsetGSCookie, UINT32 validRa
void GcInfoEncoder::SetPSPSymStackSlot( INT32 spOffsetPSPSym )
{
-#ifdef VERIFY_GCINFO
- m_DbgEncoder.SetPSPSymStackSlot(spOffsetPSPSym);
-#endif
-
_ASSERTE( spOffsetPSPSym != NO_PSP_SYM );
_ASSERTE( m_PSPSymStackSlot == NO_PSP_SYM || m_PSPSymStackSlot == spOffsetPSPSym );
@@ -763,10 +722,6 @@ void GcInfoEncoder::SetPSPSymStackSlot( INT32 spOffsetPSPSym )
void GcInfoEncoder::SetGenericsInstContextStackSlot( INT32 spOffsetGenericsContext, GENERIC_CONTEXTPARAM_TYPE type)
{
-#ifdef VERIFY_GCINFO
- m_DbgEncoder.SetGenericsInstContextStackSlot(spOffsetGenericsContext);
-#endif
-
_ASSERTE( spOffsetGenericsContext != NO_GENERICS_INST_CONTEXT);
_ASSERTE( m_GenericsInstContextStackSlot == NO_GENERICS_INST_CONTEXT || m_GenericsInstContextStackSlot == spOffsetGenericsContext );
@@ -776,10 +731,6 @@ void GcInfoEncoder::SetGenericsInstContextStackSlot( INT32 spOffsetGenericsConte
void GcInfoEncoder::SetStackBaseRegister( UINT32 regNum )
{
-#ifdef VERIFY_GCINFO
- m_DbgEncoder.SetStackBaseRegister(regNum);
-#endif
-
_ASSERTE( regNum != NO_STACK_BASE_REGISTER );
_ASSERTE(DENORMALIZE_STACK_BASE_REGISTER(NORMALIZE_STACK_BASE_REGISTER(regNum)) == regNum);
_ASSERTE( m_StackBaseRegister == NO_STACK_BASE_REGISTER || m_StackBaseRegister == regNum );
@@ -788,10 +739,6 @@ void GcInfoEncoder::SetStackBaseRegister( UINT32 regNum )
void GcInfoEncoder::SetSizeOfEditAndContinuePreservedArea( UINT32 slots )
{
-#ifdef VERIFY_GCINFO
- m_DbgEncoder.SetSizeOfEditAndContinuePreservedArea(slots);
-#endif
-
_ASSERTE( slots != NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA );
_ASSERTE( m_SizeOfEditAndContinuePreservedArea == NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA );
m_SizeOfEditAndContinuePreservedArea = slots;
@@ -805,10 +752,6 @@ void GcInfoEncoder::SetWantsReportOnlyLeaf()
#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
void GcInfoEncoder::SetSizeOfStackOutgoingAndScratchArea( UINT32 size )
{
-#ifdef VERIFY_GCINFO
- m_DbgEncoder.SetSizeOfStackOutgoingAndScratchArea(size);
-#endif
-
_ASSERTE( size != (UINT32)-1 );
_ASSERTE( m_SizeOfStackOutgoingAndScratchArea == (UINT32)-1 || m_SizeOfStackOutgoingAndScratchArea == size );
m_SizeOfStackOutgoingAndScratchArea = size;
@@ -962,10 +905,6 @@ void BitStreamWriter::Write(BitArray& a, UINT32 count)
void GcInfoEncoder::FinalizeSlotIds()
{
-#ifdef VERIFY_GCINFO
- m_DbgEncoder.FinalizeSlotIds();
-#endif
-
#ifdef _DEBUG
m_IsSlotTableFrozen = TRUE;
#endif
@@ -1030,10 +969,6 @@ bool GcInfoEncoder::IsAlwaysScratch(GcSlotDesc &slotDesc)
void GcInfoEncoder::Build()
{
-#ifdef VERIFY_GCINFO
- m_DbgEncoder.Build();
-#endif
-
#ifdef _DEBUG
_ASSERTE(m_IsSlotTableFrozen || m_NumSlots == 0);
#endif
@@ -2641,10 +2576,6 @@ BYTE* GcInfoEncoder::Emit()
size_t cbGcInfoSize = m_Info1.GetByteCount() +
m_Info2.GetByteCount();
-#ifdef VERIFY_GCINFO
- cbGcInfoSize += (sizeof(size_t)) + m_DbgEncoder.GetByteCount();
-#endif
-
LOG((LF_GCINFO, LL_INFO100, "GcInfoEncoder::Emit(): Size of GC info is %u bytes, code size %u bytes.\n", (unsigned)cbGcInfoSize, m_CodeLength ));
BYTE* destBuffer = (BYTE *)eeAllocGCInfo(cbGcInfoSize);
@@ -2654,16 +2585,6 @@ BYTE* GcInfoEncoder::Emit()
BYTE* ptr = destBuffer;
-#ifdef VERIFY_GCINFO
- _ASSERTE(sizeof(size_t) >= sizeof(UINT32));
- size_t __displacement = cbGcInfoSize - m_DbgEncoder.GetByteCount();
- ptr[0] = (BYTE)__displacement;
- ptr[1] = (BYTE) (__displacement >> 8);
- ptr[2] = (BYTE) (__displacement >> 16);
- ptr[3] = (BYTE) (__displacement >> 24);
- ptr += sizeof(size_t);
-#endif
-
m_Info1.CopyTo( ptr );
ptr += m_Info1.GetByteCount();
m_Info1.Dispose();
@@ -2676,11 +2597,6 @@ BYTE* GcInfoEncoder::Emit()
m_pAllocator->Free( m_SlotTable );
#endif
-#ifdef VERIFY_GCINFO
- _ASSERTE(ptr - destBuffer == __displacement);
- m_DbgEncoder.Emit(ptr);
-#endif
-
return destBuffer;
}
diff --git a/src/inc/dbggcinfodecoder.h b/src/inc/dbggcinfodecoder.h
deleted file mode 100644
index 71b62637b7..0000000000
--- a/src/inc/dbggcinfodecoder.h
+++ /dev/null
@@ -1,343 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-/*****************************************************************
- *
- * GC Information Decoding API
- *
- * This is an older well-tested implementation
- * now used to verify the real encoding
- * Define VERIFY_GCINFO to enable the verification
- *
- *****************************************************************/
-
-#ifdef VERIFY_GCINFO
-
-#ifndef _DBG_GC_INFO_DECODER_
-#define _DBG_GC_INFO_DECODER_
-
-#include "daccess.h"
-
-#ifndef GCINFODECODER_NO_EE
-
-#include "eetwain.h"
-
-#else // GCINFODECODER_NO_EE
-
-#if !defined(_NTAMD64_)
-#include "clrnt.h"
-#endif
-
-// Misc. VM types:
-
-class Object;
-typedef Object *OBJECTREF;
-typedef SIZE_T TADDR;
-
-// Stuff from gc.h:
-
-#ifndef __GC_H
-
-#define GC_CALL_INTERIOR 0x1
-#define GC_CALL_PINNED 0x2
-
-#endif // !__GC_H
-
-
-// Stuff from check.h:
-
-#ifndef UNREACHABLE
-#define UNREACHABLE() __assume(0)
-#endif
-
-// Stuff from eetwain.h:
-
-#ifndef _EETWAIN_H
-
-typedef void (*GCEnumCallback)(
- LPVOID hCallback, // callback data
- OBJECTREF* pObject, // address of obect-reference we are reporting
- uint32_t flags // is this a pinned and/or interior pointer
-);
-
-
-#if !defined(_TARGET_X86_)
-#define USE_GC_INFO_DECODER
-#endif
-
-#include "regdisp.h"
-
-#endif // !_EETWAIN_H
-
-#endif // GCINFODECODER_NO_EE
-
-#include "gcinfotypes.h"
-
-
-namespace DbgGcInfo {
-
-struct GcSlotDesc
-{
- union
- {
- UINT32 RegisterNumber;
- GcStackSlot Stack;
- } Slot;
- GcSlotFlags Flags;
-};
-
-class BitStreamReader
-{
-public:
- BitStreamReader( const BYTE* pBuffer )
- {
- _ASSERTE( pBuffer != NULL );
- m_pBuffer = (PTR_BYTE)(TADDR)pBuffer;
- m_BitsRead = 0;
- }
-
- //
- // bit 0 is the least significative bit
- // count can be negative so that bits are written in most-significative to least-significative order
- //
- size_t Read( int numBits )
- {
- size_t result = 0;
- int curBitsRead = 0;
-
- while( curBitsRead < numBits )
- {
- int currByte = m_BitsRead /8;
- int currBitInCurrentByte = m_BitsRead % 8;
- int bitsLeftInCurrentByte = 8 - currBitInCurrentByte;
- _ASSERTE( bitsLeftInCurrentByte > 0 );
-
- int bitsToReadInCurrentByte = min( numBits - curBitsRead, bitsLeftInCurrentByte );
-
- size_t data = m_pBuffer[ currByte ];
- data >>= currBitInCurrentByte;
- data &= (1<<bitsToReadInCurrentByte) -1;
-
- data <<= curBitsRead;
- result |= data;
-
- curBitsRead += bitsToReadInCurrentByte;
- m_BitsRead += bitsToReadInCurrentByte;
- }
-
- return result;
- }
-
- // Returns the number of bits read so far
- size_t GetCurrentPos()
- {
- return m_BitsRead;
- }
-
- void SetCurrentPos( size_t pos )
- {
- m_BitsRead = pos;
- }
-
- // Can use negative values
- void Skip( SSIZE_T numBitsToSkip )
- {
- m_BitsRead += numBitsToSkip;
- _ASSERTE( m_BitsRead >= 0 );
- }
-
- //--------------------------------------------------------------------------
- // Decode variable length numbers
- // See the corresponding methods on BitStreamWriter for more information on the format
- //--------------------------------------------------------------------------
-
- inline size_t DecodeVarLengthUnsigned( int base )
- {
- _ASSERTE((base > 0) && (base < (int)sizeof(size_t)*8));
- size_t numEncodings = 1 << base;
- size_t result = 0;
- for(int shift=0; ; shift+=base)
- {
- _ASSERTE(shift+base <= (int)sizeof(size_t)*8);
-
- size_t currentChunk = Read(base+1);
- result |= (currentChunk & (numEncodings-1)) << shift;
- if(!(currentChunk & numEncodings))
- {
- // Extension bit is not set, we're done.
- return result;
- }
- }
- }
-
- inline SSIZE_T DecodeVarLengthSigned( int base )
- {
- _ASSERTE((base > 0) && (base < (int)sizeof(SSIZE_T)*8));
- size_t numEncodings = 1 << base;
- SSIZE_T result = 0;
- for(int shift=0; ; shift+=base)
- {
- _ASSERTE(shift+base <= (int)sizeof(SSIZE_T)*8);
-
- size_t currentChunk = Read(base+1);
- result |= (currentChunk & (numEncodings-1)) << shift;
- if(!(currentChunk & numEncodings))
- {
- // Extension bit is not set, sign-extend and we're done.
- int sbits = sizeof(SSIZE_T)*8 - (shift+base);
- result <<= sbits;
- result >>= sbits; // This provides the sign extension
- return result;
- }
- }
- }
-
-private:
- PTR_BYTE m_pBuffer;
- size_t m_BitsRead;
-};
-
-
-class GcInfoDecoder
-{
-public:
-
- // If you are not insterested in interruptibility or gc lifetime information, pass 0 as instructionOffset
- GcInfoDecoder(
- const BYTE* gcInfoAddr,
- GcInfoDecoderFlags flags,
- UINT32 instructionOffset = 0
- );
-
-
- //------------------------------------------------------------------------
- // Interruptibility
- //------------------------------------------------------------------------
-
- bool IsInterruptible();
-
- // Returns true to stop enumerating.
- typedef bool EnumerateInterruptibleRangesCallback (UINT32 startOffset, UINT32 stopOffset, LPVOID hCallback);
-
- void EnumerateInterruptibleRanges (
- EnumerateInterruptibleRangesCallback *pCallback,
- LPVOID hCallback);
-
- //------------------------------------------------------------------------
- // GC lifetime information
- //------------------------------------------------------------------------
-
- bool EnumerateLiveSlots(
- PREGDISPLAY pRD,
- bool reportScratchSlots,
- unsigned flags,
- GCEnumCallback pCallBack,
- LPVOID hCallBack
- );
-
- void VerifyLiveRegister(
- UINT32 regNum,
- GcSlotFlags flags
- );
-
-
- void VerifyLiveStackSlot(
- int spOffset,
- GcStackSlotBase spBase,
- GcSlotFlags flags
- );
-
- void DoFinalVerification();
-
- //------------------------------------------------------------------------
- // Miscellaneous method information
- //------------------------------------------------------------------------
-
- INT32 GetSecurityObjectStackSlot();
- INT32 GetPSPSymStackSlot();
- INT32 GetGenericsInstContextStackSlot();
- bool GetIsVarArg();
- UINT32 GetCodeLength();
- UINT32 GetStackBaseRegister();
- UINT32 GetSizeOfEditAndContinuePreservedArea();
-
-#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
- UINT32 GetSizeOfStackParameterArea();
-#endif // FIXED_STACK_PARAMETER_SCRATCH_AREA
-
-private:
- BitStreamReader m_Reader;
- UINT32 m_InstructionOffset;
-
- // Pre-decoded information
- bool m_IsInterruptible;
- bool m_IsVarArg;
- INT32 m_SecurityObjectStackSlot;
- INT32 m_PSPSymStackSlot;
- INT32 m_GenericsInstContextStackSlot;
- UINT32 m_CodeLength;
- UINT32 m_StackBaseRegister;
- UINT32 m_SizeOfEditAndContinuePreservedArea;
- UINT32 m_NumInterruptibleRanges;
-
-#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
- UINT32 m_SizeOfStackOutgoingAndScratchArea;
-#endif // FIXED_STACK_PARAMETER_SCRATCH_AREA
-
-#ifdef _DEBUG
- GcInfoDecoderFlags m_Flags;
-#endif
-
- GcSlotDesc* m_pLiveRegisters;
- GcSlotDesc* m_pLiveStackSlots;
- int m_NumLiveRegisters;
- int m_NumLiveStackSlots;
-
- CQuickBytes qbSlots1;
- CQuickBytes qbSlots2;
-
- static bool SetIsInterruptibleCB (UINT32 startOffset, UINT32 stopOffset, LPVOID hCallback);
-
- OBJECTREF* GetRegisterSlot(
- int regNum,
- PREGDISPLAY pRD
- );
-
- OBJECTREF* GetStackSlot(
- INT32 spOffset,
- GcStackSlotBase spBase,
- PREGDISPLAY pRD
- );
-
- bool IsScratchRegister(int regNum, PREGDISPLAY pRD);
- bool IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, PREGDISPLAY pRD);
-
- void ReportRegisterToGC(
- int regNum,
- BOOL isInterior,
- BOOL isPinned,
- PREGDISPLAY pRD,
- unsigned flags,
- GCEnumCallback pCallBack,
- LPVOID hCallBack
- );
-
- void ReportStackSlotToGC(
- INT32 spOffset,
- GcStackSlotBase spBase,
- BOOL isInterior,
- BOOL isPinned,
- PREGDISPLAY pRD,
- unsigned flags,
- GCEnumCallback pCallBack,
- LPVOID hCallBack
- );
-
-
-};
-
-}
-
-#endif // _DBG_GC_INFO_DECODER_
-#endif // VERIFY_GCINFO
-
diff --git a/src/inc/dbggcinfoencoder.h b/src/inc/dbggcinfoencoder.h
deleted file mode 100644
index 85b56ad297..0000000000
--- a/src/inc/dbggcinfoencoder.h
+++ /dev/null
@@ -1,469 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-/*****************************************************************************
- *
- * GC Information Encoding API
- *
- * This is an older well-tested implementation
- * now used to verify the real encoding
- * Define VERIFY_GCINFO to enable the verification
- *
- */
-
-#ifdef VERIFY_GCINFO
-
-#ifndef __DBGGCINFOENCODER_H__
-#define __DBGGCINFOENCODER_H__
-
-#include <windows.h>
-
-#include <wchar.h>
-#include <stdio.h>
-
-#include "corjit.h"
-#include "iallocator.h"
-#include "gcinfoarraylist.h"
-
-#include "stdmacros.h"
-#include "gcinfotypes.h"
-
-
-class IJitAllocator;
-
-
-
-namespace DbgGcInfo {
-
-//-----------------------------------------------------------------------------
-// The following macro controls whether the encoder has to call the IJitAllocator::Free method
-// Don't call IJitAllocator::Free for mscorjit64.dll
-//-----------------------------------------------------------------------------
-//#define MUST_CALL_JITALLOCATOR_FREE
-
-
-class BitStreamWriter
-{
-public:
- BitStreamWriter( IJitAllocator* pAllocator );
- void Write( size_t data, int count );
-
- inline size_t GetBitCount()
- {
- return m_BitCount;
- }
-
- inline size_t GetByteCount()
- {
- return ( m_BitCount + 7 ) / 8;
- }
-
-
- void CopyTo( BYTE* buffer );
- void Dispose();
-
- //--------------------------------------------------------
- // Encode variable length numbers
- // Uses base+1 bits at minimum
- // Bits 0..(base-1) represent the encoded quantity
- // If it doesn't fit, set bit #base to 1 and use base+1 more bits
- //--------------------------------------------------------
- int EncodeVarLengthUnsigned( size_t n, int base )
- {
- _ASSERTE((base > 0) && (base < sizeof(size_t)*8));
- size_t numEncodings = 1 << base;
- int bitsUsed = base+1;
- for( ; ; bitsUsed += base+1)
- {
- if( n < numEncodings )
- {
- Write( n, base+1 ); // This sets the extension bit to zero
- return bitsUsed;
- }
- else
- {
- size_t currentChunk = n & (numEncodings-1);
- Write( currentChunk | numEncodings, base+1 );
- n >>= base;
- }
- }
- return bitsUsed;
- }
-
- //--------------------------------------------------------
- // Signed quantities are encoded the same as unsigned
- // The most relevant difference is that a number is considered
- // to fit in base bits if the topmost bit of a base-long chunk
- // matches the sign of the whole number
- //--------------------------------------------------------
- int EncodeVarLengthSigned( SSIZE_T n, int base )
- {
- _ASSERTE((base > 0) && (base < sizeof(SSIZE_T)*8));
- size_t numEncodings = 1 << base;
- for(int bitsUsed = base+1; ; bitsUsed += base+1)
- {
- size_t currentChunk = ((size_t) n) & (numEncodings-1);
- size_t topmostBit = currentChunk & (numEncodings >> 1);
- n >>= base; // signed arithmetic shift
- if( topmostBit && (n == (SSIZE_T)-1) || !topmostBit && (n == 0))
- {
- // The topmost bit correctly represents the sign
- Write( currentChunk, base+1 ); // This sets the extension bit to zero
- return bitsUsed;
- }
- else
- {
- Write( currentChunk | numEncodings, base+1 );
- }
- }
- }
-
-private:
-
- class MemoryBlockList;
- class MemoryBlock
- {
- friend class MemoryBlockList;
- MemoryBlock* m_next;
-
- public:
- size_t Contents[];
-
- inline MemoryBlock* Next()
- {
- return m_next;
- }
- };
-
- class MemoryBlockList
- {
- MemoryBlock* m_head;
- MemoryBlock* m_tail;
-
- public:
- MemoryBlockList();
-
- inline MemoryBlock* Head()
- {
- return m_head;
- }
-
- MemoryBlock* AppendNew(IAllocator* allocator, size_t bytes);
- void Dispose(IAllocator* allocator);
- };
-
- IJitAllocator* m_pAllocator;
- size_t m_BitCount;
- int m_FreeBitsInCurrentSlot;
- MemoryBlockList m_MemoryBlocks;
- const static int m_MemoryBlockSize = 512; // must be a multiple of the pointer size
- size_t* m_pCurrentSlot; // bits are written through this pointer
- size_t* m_OutOfBlockSlot; // sentinel value to determine when the block is full
-#ifdef _DEBUG
- int m_MemoryBlocksCount;
-#endif
-
-private:
- // Writes bits knowing that they will all fit in the current memory slot
- inline void WriteInCurrentSlot( size_t data, int count )
- {
- data &= SAFE_SHIFT_LEFT(1, count) - 1;
-
- data <<= (sizeof( size_t )*8-m_FreeBitsInCurrentSlot);
-
- *m_pCurrentSlot |= data;
- }
-
- void AllocMemoryBlock();
-
- inline void InitCurrentSlot()
- {
- m_FreeBitsInCurrentSlot = sizeof( size_t )*8;
- *m_pCurrentSlot = 0;
- }
-
-};
-
-struct GcSlotDesc
-{
- union
- {
- UINT32 RegisterNumber;
- GcStackSlot Stack;
- } Slot;
- unsigned IsRegister : 1;
- unsigned IsInterior : 1;
- unsigned IsPinned : 1;
-};
-
-
-
-typedef UINT32 GcSlotId;
-
-
-class GcSlotSet
-{
- friend class GcInfoEncoder;
-public:
- GcSlotSet( GcInfoEncoder * pEncoder );
-
- // Copy constructor
- GcSlotSet( GcSlotSet & other );
-
- inline void Add( GcSlotId slotId );
-
- inline void Remove( GcSlotId slotId );
-
-// Not used
-#if 0
- inline void RemoveAll();
-
- void Add( GcSlotSet & other );
- void Subtract( GcSlotSet & other );
- void Intersect( GcSlotSet & other );
-#endif
-
- // Must be called when done with the object
- inline void Dispose();
-
-private:
- // A bit vector representing the set
- BYTE * m_Data;
-
- int m_NumBytes;
-
- GcInfoEncoder* m_pEncoder;
-};
-
-
-class GcInfoEncoder
-{
-public:
- GcInfoEncoder(
- ICorJitInfo* pCorJitInfo,
- CORINFO_METHOD_INFO* pMethodInfo,
- IJitAllocator* pJitAllocator
- );
-
-
- //------------------------------------------------------------------------
- // Interruptibility
- //------------------------------------------------------------------------
-
- // An instruction at offset x will be interruptible
- // if-and-only-if startInstructionOffset <= x < startInstructionOffset+length
- void DefineInterruptibleRange( UINT32 startInstructionOffset, UINT32 length );
-
-
- //------------------------------------------------------------------------
- // Slot information
- //------------------------------------------------------------------------
-
- //
- // spOffset are always relative to the SP of the caller (same as SP at the method entry and exit)
- // Negative offsets describe GC refs in the local and outgoing areas.
- // Positive offsets describe GC refs in the scratch area
- // Note that if the dynamic allocation area is resized, the outgoing area will not be valid anymore
- // Old slots must be declared dead and new ones can be defined.
- // It's up to the JIT to do the right thing. We don't enforce this.
- //
-
- GcSlotId GetRegisterSlotId( UINT32 regNum, GcSlotFlags flags );
- GcSlotId GetStackSlotId( INT32 spOffset, GcSlotFlags flags, GcStackSlotBase spBase = GC_CALLER_SP_REL );
-
- //
- // After a FinalizeSlotIds is called, no more slot definitions can be made.
- // FinalizeSlotIds must be called once and only once before calling DefineGcStateAtCallSite
- // If no call sites are described, calling FinalizeSlotIds can and should (for performance reasons) be avoided
- //
- void FinalizeSlotIds();
-
-
-#if 0
-#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
-
- //------------------------------------------------------------------------
- // Partially-interruptible information
- //------------------------------------------------------------------------
-
-
- void DefineGcStateAtSafePoint(
- UINT32 instructionOffset,
- GcSlotSet &liveSlots
- );
-
-#endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
-#endif
-
- //------------------------------------------------------------------------
- // Fully-interruptible information
- //------------------------------------------------------------------------
-
- //
- // For inputs, pass zero as offset
- //
-
- // This method defines what the GC state of a slot is when a thread's suspension IP
- // is equal to instructionOffset
-
- void SetSlotState( UINT32 instructionOffset,
- GcSlotId slotId,
- GcSlotState slotState
- );
-
-
-
- //------------------------------------------------------------------------
- // Miscellaneous method information
- //------------------------------------------------------------------------
-
- void SetSecurityObjectStackSlot( INT32 spOffset );
- void SetPSPSymStackSlot( INT32 spOffsetPSPSym );
- void SetGenericsInstContextStackSlot( INT32 spOffsetGenericsContext );
- void SetIsVarArg();
- void SetCodeLength( UINT32 length );
-
- // Optional in the general case. Required if the method uses GC_FRAMEREG_REL stack slots
- void SetStackBaseRegister( UINT32 registerNumber );
- void SetSizeOfEditAndContinuePreservedArea( UINT32 size );
-
-#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
- void SetSizeOfStackOutgoingAndScratchArea( UINT32 size );
-#endif // FIXED_STACK_PARAMETER_SCRATCH_AREA
-
-
- //------------------------------------------------------------------------
- // Encoding
- //------------------------------------------------------------------------
-
- //
- // Build() encodes GC information into temporary buffers.
- // The method description cannot change after Build is called
- //
- void Build();
-
- //
- // Write encoded information to its final destination and frees temporary buffers.
- // The encoder shouldn't be used anymore after calling this method.
- // It returns a pointer to the destination buffer, which address is byte-aligned
- //
- size_t GetByteCount();
- BYTE* Emit(BYTE* dest);
-
-private:
-
- friend class LifetimeTransitionsQuickSort;
- friend class LifetimeTransitionsQuickSortByOffset;
-
- struct LifetimeTransition
- {
- UINT32 CodeOffset;
- GcSlotDesc SlotDesc;
- bool BecomesLive;
- };
-
- ICorJitInfo* m_pCorJitInfo;
- CORINFO_METHOD_INFO* m_pMethodInfo;
- IJitAllocator* m_pAllocator;
-
-#ifdef _DEBUG
- char *m_MethodName, *m_ModuleName;
-#endif
-
- BitStreamWriter m_HeaderInfoWriter;
-#if 0
-#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
- BitStreamWriter m_PartiallyInterruptibleInfoWriter;
-#endif
-#endif
- BitStreamWriter m_FullyInterruptibleInfoWriter;
-
- GcInfoArrayList<LifetimeTransition, 64> m_LifetimeTransitions;
- LifetimeTransition *m_rgSortedTransitions;
-
- bool m_IsVarArg;
- INT32 m_SecurityObjectStackSlot;
- INT32 m_PSPSymStackSlot;
- INT32 m_GenericsInstContextStackSlot;
- UINT32 m_CodeLength;
- UINT32 m_StackBaseRegister;
- UINT32 m_SizeOfEditAndContinuePreservedArea;
- UINT32 m_LastInterruptibleRangeStopOffset;
- UINT32 m_NumInterruptibleRanges;
-
-#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
- UINT32 m_SizeOfStackOutgoingAndScratchArea;
-#endif // FIXED_STACK_PARAMETER_SCRATCH_AREA
-
- void * eeAllocGCInfo (size_t blockSize);
-
- inline int EncodeFullyInterruptibleSlotFlags(GcSlotDesc slotDesc)
- {
- int flagEnc = 1;
- if( slotDesc.IsInterior )
- flagEnc |= 0x2;
- if( slotDesc.IsPinned )
- flagEnc |= 0x4;
- if(flagEnc == 1)
- {
- m_FullyInterruptibleInfoWriter.Write(0, 1);
- return 1;
- }
- else
- {
- m_FullyInterruptibleInfoWriter.Write(flagEnc, 3);
- return 3;
- }
- }
-
-
-private:
-
- friend class GcSlotSet;
- friend class EncoderCheckState;
-
- static const UINT32 m_MappingTableInitialSize = 32;
- UINT32 m_MappingTableSize;
- UINT32 m_NumSlotMappings;
- GcSlotDesc *m_SlotMappings;
-
-#if 0
-#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
- UINT32 m_NumSafePointsWithGcState;
-#endif
-#endif
-
- void GrowMappingTable();
-
-#ifdef _DEBUG
- bool m_IsMappingTableFrozen;
-#endif
-};
-
-
-
-// Not used
-#if 0
-
-void GcSlotSet::RemoveAll()
-{
- ZeroMemory( m_Data, m_NumBytes );
-}
-
-#endif
-
-
-void GcSlotSet::Dispose()
-{
-#ifdef MUST_CALL_JITALLOCATOR_FREE
- m_pEncoder->m_pAllocator->Free( m_Data );
-#endif
-}
-
-
-}
-
-#endif // !__DBGGCINFOENCODER_H__
-
-#endif // VERIFY_GCINFO
-
diff --git a/src/inc/eetwain.h b/src/inc/eetwain.h
index a7bab8701e..6e183c5546 100644
--- a/src/inc/eetwain.h
+++ b/src/inc/eetwain.h
@@ -30,6 +30,7 @@
#include "corjit.h" // For NativeVarInfo
#include "stackwalktypes.h"
#include "bitvector.h"
+#include "gcinfotypes.h"
#if !defined(_TARGET_X86_)
#define USE_GC_INFO_DECODER
@@ -218,7 +219,7 @@ virtual bool IsGcSafe(EECodeInfo *pCodeInfo,
*/
virtual unsigned FindEndOfLastInterruptibleRegion(unsigned curOffset,
unsigned endOffset,
- PTR_VOID methodInfoPtr) = 0;
+ GCInfoToken gcInfoToken) = 0;
#endif // _TARGET_AMD64_ && _DEBUG
/*
@@ -293,7 +294,7 @@ virtual bool IsInSynchronizedRegion(
not take procedure splitting into account). For the actual size of
the hot region call IJitManager::JitTokenToMethodHotSize.
*/
-virtual size_t GetFunctionSize(PTR_VOID methodInfoPtr) = 0;
+virtual size_t GetFunctionSize(GCInfoToken gcInfoToken) = 0;
/*
Returns the size of the frame (barring localloc)
@@ -447,7 +448,7 @@ bool IsGcSafe( EECodeInfo *pCodeInfo,
virtual
unsigned FindEndOfLastInterruptibleRegion(unsigned curOffset,
unsigned endOffset,
- PTR_VOID methodInfoPtr);
+ GCInfoToken gcInfoToken);
#endif // _TARGET_AMD64_ && _DEBUG
/*
@@ -551,8 +552,7 @@ bool IsInSynchronizedRegion(
Returns the size of a given function.
*/
virtual
-size_t GetFunctionSize(
- PTR_VOID methodInfoPtr);
+size_t GetFunctionSize(GCInfoToken gcInfoToken);
/*
Returns the size of the frame (barring localloc)
diff --git a/src/inc/gcdecoder.cpp b/src/inc/gcdecoder.cpp
index 7472c9aa62..d337faeebc 100644
--- a/src/inc/gcdecoder.cpp
+++ b/src/inc/gcdecoder.cpp
@@ -18,7 +18,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
/* This file is shared between the VM and JIT/IL and SOS/Strike directories */
-#include "gcinfo.h"
+#include "gcinfotypes.h"
/*****************************************************************************/
/*
diff --git a/src/inc/gcdump.h b/src/inc/gcdump.h
index aded6bb102..cd73940ded 100644
--- a/src/inc/gcdump.h
+++ b/src/inc/gcdump.h
@@ -17,7 +17,7 @@
#define __GCDUMP_H__
/*****************************************************************************/
-#include "gcinfo.h" // For InfoHdr
+#include "gcinfotypes.h" // For InfoHdr
#ifndef FASTCALL
#ifndef FEATURE_PAL
@@ -32,7 +32,8 @@ class GCDump
{
public:
- GCDump (bool encBytes = true,
+ GCDump (UINT32 gcInfoVersion,
+ bool encBytes = true,
unsigned maxEncBytes = 5,
bool dumpCodeOffs = true);
@@ -44,7 +45,7 @@ public:
* Return value : Size in bytes of the header encoding
*/
- unsigned FASTCALL DumpInfoHdr (PTR_CBYTE table,
+ unsigned FASTCALL DumpInfoHdr (PTR_CBYTE gcInfoBlock,
InfoHdr * header, /* OUT */
unsigned * methodSize, /* OUT */
bool verifyGCTables = false);
@@ -52,13 +53,12 @@ public:
/*-------------------------------------------------------------------------
* Dumps the GC tables to 'stdout'
- * table : Ptr to the start of the table part of the GC info.
- * This immediately follows the GCinfo header
+ * table : The GCInfoToken
* verifyGCTables : If the JIT has been compiled with VERIFY_GC_TABLES
* Return value : Size in bytes of the GC table encodings
*/
- size_t FASTCALL DumpGCTable (PTR_CBYTE table,
+ size_t FASTCALL DumpGCTable (PTR_CBYTE gcInfoBlock,
#ifdef _TARGET_X86_
const InfoHdr& header,
#endif
@@ -79,6 +79,7 @@ public:
public:
typedef void (*printfFtn)(const char* fmt, ...);
printfFtn gcPrintf;
+ UINT32 gcInfoVersion;
//-------------------------------------------------------------------------
protected:
@@ -89,7 +90,7 @@ protected:
/* Helper methods */
- PTR_CBYTE DumpEncoding(PTR_CBYTE table,
+ PTR_CBYTE DumpEncoding(PTR_CBYTE gcInfoBlock,
int cDumpBytes);
void DumpOffset (unsigned o);
void DumpOffsetEx(unsigned o);
diff --git a/src/inc/gcinfo.h b/src/inc/gcinfo.h
index bb80620f31..500e1b7a02 100644
--- a/src/inc/gcinfo.h
+++ b/src/inc/gcinfo.h
@@ -8,11 +8,8 @@
#define _GCINFO_H_
/*****************************************************************************/
-#include <stdlib.h> // For memcmp()
-#include "windef.h" // For BYTE
#include "daccess.h"
-
-#include "bitvector.h" // for ptrArgTP
+#include "windef.h" // For BYTE
// Some declarations in this file are used on non-x86 platforms, but most are x86-specific.
@@ -31,234 +28,32 @@ const unsigned byref_OFFSET_FLAG = 0x1; // the offset is an interior ptr
const unsigned pinned_OFFSET_FLAG = 0x2; // the offset is a pinned ptr
const unsigned this_OFFSET_FLAG = 0x2; // the offset is "this"
-#ifdef _TARGET_X86_
-
-#ifndef FASTCALL
-#define FASTCALL __fastcall
-#endif
+//-----------------------------------------------------------------------------
+// The current GCInfo Version
+//-----------------------------------------------------------------------------
-// we use offsetof to get the offset of a field
-#include <stddef.h> // offsetof
-#ifndef offsetof
-#define offsetof(s,m) ((size_t)&(((s *)0)->m))
-#endif
-
-enum infoHdrAdjustConstants {
- // Constants
- SET_FRAMESIZE_MAX = 7,
- SET_ARGCOUNT_MAX = 8, // Change to 6
- SET_PROLOGSIZE_MAX = 16,
- SET_EPILOGSIZE_MAX = 10, // Change to 6
- SET_EPILOGCNT_MAX = 4,
- SET_UNTRACKED_MAX = 3
-};
+#define GCINFO_VERSION 1
+//-----------------------------------------------------------------------------
+// GCInfoToken: A wrapper that contains the GcInfo data and version number.
//
-// Enum to define the 128 codes that are used to incrementally adjust the InfoHdr structure
+// The version# is not stored in the GcInfo structure -- because it is
+// wasteful to store the version once for every method.
+// Instead, the version# istracked per range-section of generated/loaded methods.
//
-enum infoHdrAdjust {
-
- SET_FRAMESIZE = 0, // 0x00
- SET_ARGCOUNT = SET_FRAMESIZE + SET_FRAMESIZE_MAX + 1, // 0x08
- SET_PROLOGSIZE = SET_ARGCOUNT + SET_ARGCOUNT_MAX + 1, // 0x11
- SET_EPILOGSIZE = SET_PROLOGSIZE + SET_PROLOGSIZE_MAX + 1, // 0x22
- SET_EPILOGCNT = SET_EPILOGSIZE + SET_EPILOGSIZE_MAX + 1, // 0x2d
- SET_UNTRACKED = SET_EPILOGCNT + (SET_EPILOGCNT_MAX + 1) * 2, // 0x37
-
- FIRST_FLIP = SET_UNTRACKED + SET_UNTRACKED_MAX + 1,
-
- FLIP_EDI_SAVED = FIRST_FLIP, // 0x3b
- FLIP_ESI_SAVED, // 0x3c
- FLIP_EBX_SAVED, // 0x3d
- FLIP_EBP_SAVED, // 0x3e
- FLIP_EBP_FRAME, // 0x3f
- FLIP_INTERRUPTIBLE, // 0x40
- FLIP_DOUBLE_ALIGN, // 0x41
- FLIP_SECURITY, // 0x42
- FLIP_HANDLERS, // 0x43
- FLIP_LOCALLOC, // 0x44
- FLIP_EDITnCONTINUE, // 0x45
- FLIP_VAR_PTR_TABLE_SZ, // 0x46 Flip whether a table-size exits after the header encoding
- FFFF_UNTRACKED_CNT, // 0x47 There is a count (>SET_UNTRACKED_MAX) after the header encoding
- FLIP_VARARGS, // 0x48
- FLIP_PROF_CALLBACKS, // 0x49
- FLIP_HAS_GS_COOKIE, // 0x4A - The offset of the GuardStack cookie follows after the header encoding
- FLIP_SYNC, // 0x4B
- FLIP_HAS_GENERICS_CONTEXT,// 0x4C
- FLIP_GENERICS_CONTEXT_IS_METHODDESC,// 0x4D
-
- // 0x4E .. 0x4f unused
-
- NEXT_FOUR_START = 0x50,
- NEXT_FOUR_FRAMESIZE = 0x50,
- NEXT_FOUR_ARGCOUNT = 0x60,
- NEXT_THREE_PROLOGSIZE = 0x70,
- NEXT_THREE_EPILOGSIZE = 0x78
-};
-
-#define HAS_UNTRACKED ((unsigned int) -1)
-#define HAS_VARPTR ((unsigned int) -1)
-// 0 is not a valid offset for EBP-frames as all locals are at a negative offset
-// For ESP frames, the cookie is above (at a higher address than) the buffers,
-// and so cannot be at offset 0.
-#define INVALID_GS_COOKIE_OFFSET 0
-// Temporary value to indicate that the offset needs to be read after the header
-#define HAS_GS_COOKIE_OFFSET ((unsigned int) -1)
-
-// 0 is not a valid sync offset
-#define INVALID_SYNC_OFFSET 0
-// Temporary value to indicate that the offset needs to be read after the header
-#define HAS_SYNC_OFFSET ((unsigned int) -1)
-
-#define INVALID_ARGTAB_OFFSET 0
-
-#include <pshpack1.h>
-
-// Working set optimization: saving 12 * 128 = 1536 bytes in infoHdrShortcut
-struct InfoHdr;
-
-struct InfoHdrSmall {
- unsigned char prologSize; // 0
- unsigned char epilogSize; // 1
- unsigned char epilogCount : 3; // 2 [0:2]
- unsigned char epilogAtEnd : 1; // 2 [3]
- unsigned char ediSaved : 1; // 2 [4] which callee-saved regs are pushed onto stack
- unsigned char esiSaved : 1; // 2 [5]
- unsigned char ebxSaved : 1; // 2 [6]
- unsigned char ebpSaved : 1; // 2 [7]
- unsigned char ebpFrame : 1; // 3 [0] locals accessed relative to ebp
- unsigned char interruptible : 1; // 3 [1] is intr. at all points (except prolog/epilog), not just call-sites
- unsigned char doubleAlign : 1; // 3 [2] uses double-aligned stack (ebpFrame will be false)
- unsigned char security : 1; // 3 [3] has slot for security object
- unsigned char handlers : 1; // 3 [4] has callable handlers
- unsigned char localloc : 1; // 3 [5] uses localloc
- unsigned char editNcontinue : 1; // 3 [6] was JITed in EnC mode
- unsigned char varargs : 1; // 3 [7] function uses varargs calling convention
- unsigned char profCallbacks : 1; // 4 [0]
- unsigned char genericsContext : 1;//4 [1] function reports a generics context parameter is present
- unsigned char genericsContextIsMethodDesc : 1;//4[2]
- unsigned short argCount; // 5,6 in bytes
- unsigned int frameSize; // 7,8,9,10 in bytes
- unsigned int untrackedCnt; // 11,12,13,14
- unsigned int varPtrTableSize; // 15.16,17,18
-
- // Checks whether "this" is compatible with "target".
- // It is not an exact bit match as "this" could have some
- // marker/place-holder values, which will have to be written out
- // after the header.
-
- bool isHeaderMatch(const InfoHdr& target) const;
-};
-
-
-struct InfoHdr : public InfoHdrSmall {
- // 0 (zero) means that there is no GuardStack cookie
- // The cookie is either at ESP+gsCookieOffset or EBP-gsCookieOffset
- unsigned int gsCookieOffset; // 19,20,21,22
- unsigned int syncStartOffset; // 23,24,25,26
- unsigned int syncEndOffset; // 27,28,29,30
-
- // 31 bytes total
-
- // Checks whether "this" is compatible with "target".
- // It is not an exact bit match as "this" could have some
- // marker/place-holder values, which will have to be written out
- // after the header.
-
- bool isHeaderMatch(const InfoHdr& target) const
- {
-#ifdef _ASSERTE
- // target cannot have place-holder values.
- _ASSERTE(target.untrackedCnt != HAS_UNTRACKED &&
- target.varPtrTableSize != HAS_VARPTR &&
- target.gsCookieOffset != HAS_GS_COOKIE_OFFSET &&
- target.syncStartOffset != HAS_SYNC_OFFSET);
-#endif
-
- // compare two InfoHdr's up to but not including the untrackCnt field
- if (memcmp(this, &target, offsetof(InfoHdr, untrackedCnt)) != 0)
- return false;
-
- if (untrackedCnt != target.untrackedCnt) {
- if (target.untrackedCnt <= SET_UNTRACKED_MAX)
- return false;
- else if (untrackedCnt != HAS_UNTRACKED)
- return false;
- }
-
- if (varPtrTableSize != target.varPtrTableSize) {
- if ((varPtrTableSize != 0) != (target.varPtrTableSize != 0))
- return false;
- }
-
- if ((gsCookieOffset == INVALID_GS_COOKIE_OFFSET) !=
- (target.gsCookieOffset == INVALID_GS_COOKIE_OFFSET))
- return false;
-
- if ((syncStartOffset == INVALID_SYNC_OFFSET) !=
- (target.syncStartOffset == INVALID_SYNC_OFFSET))
- return false;
-
- return true;
- }
-};
-
-
-union CallPattern {
- struct {
- unsigned char argCnt;
- unsigned char regMask; // EBP=0x8, EBX=0x4, ESI=0x2, EDI=0x1
- unsigned char argMask;
- unsigned char codeDelta;
- } fld;
- unsigned val;
-};
-
-#include <poppack.h>
-
-#define IH_MAX_PROLOG_SIZE (51)
-
-extern const InfoHdrSmall infoHdrShortcut[];
-extern int infoHdrLookup[];
-
-inline void GetInfoHdr(int index, InfoHdr * header)
+// The GCInfo version is computed as :
+// 1) The current GCINFO_VERSION for JITted and Ngened images
+// 2) A function of the Ready - to - run major version stored in READYTORUN_HEADER
+// for ready - to - run images.ReadyToRunJitManager::JitTokenToGCInfoVersion()
+// provides the GcInfo version for any Method.Currently, there's only one
+// version of GCInfo.
+//-----------------------------------------------------------------------------
+
+struct GCInfoToken
{
- * ((InfoHdrSmall *) header) = infoHdrShortcut[index];
-
- header->gsCookieOffset = 0;
- header->syncStartOffset = 0;
- header->syncEndOffset = 0;
-}
-
-PTR_CBYTE FASTCALL decodeHeader(PTR_CBYTE table, InfoHdr* header);
-
-BYTE FASTCALL encodeHeaderFirst(const InfoHdr& header, InfoHdr* state, int* more, int *pCached);
-BYTE FASTCALL encodeHeaderNext (const InfoHdr& header, InfoHdr* state);
-
-size_t FASTCALL decodeUnsigned (PTR_CBYTE src, unsigned* value);
-size_t FASTCALL decodeUDelta (PTR_CBYTE src, unsigned* value, unsigned lastValue);
-size_t FASTCALL decodeSigned (PTR_CBYTE src, int * value);
-
-#define CP_MAX_CODE_DELTA (0x23)
-#define CP_MAX_ARG_CNT (0x02)
-#define CP_MAX_ARG_MASK (0x00)
-
-extern const unsigned callPatternTable[];
-extern const unsigned callCommonDelta[];
-
-
-int FASTCALL lookupCallPattern(unsigned argCnt,
- unsigned regMask,
- unsigned argMask,
- unsigned codeDelta);
-
-void FASTCALL decodeCallPattern(int pattern,
- unsigned * argCnt,
- unsigned * regMask,
- unsigned * argMask,
- unsigned * codeDelta);
-
-#endif // _TARGET_86_ || _TARGET_ARM_
+ PTR_VOID Info;
+ UINT32 Version;
+};
/*****************************************************************************/
#endif //_GCINFO_H_
diff --git a/src/inc/gcinfodecoder.h b/src/inc/gcinfodecoder.h
index 52e8ed8b62..f703727a76 100644
--- a/src/inc/gcinfodecoder.h
+++ b/src/inc/gcinfodecoder.h
@@ -11,7 +11,7 @@
#ifndef _GC_INFO_DECODER_
#define _GC_INFO_DECODER_
-#include "daccess.h"
+#include "gcinfotypes.h"
#define _max(a, b) (((a) > (b)) ? (a) : (b))
#define _min(a, b) (((a) < (b)) ? (a) : (b))
@@ -176,10 +176,6 @@ enum GcInfoDecoderFlags
DECODE_EDIT_AND_CONTINUE = 0x800,
};
-#ifdef VERIFY_GCINFO
-#include "dbggcinfodecoder.h"
-#endif
-
enum GcInfoHeaderFlags
{
GC_INFO_IS_VARARG = 0x1,
@@ -433,12 +429,11 @@ public:
// If you are not insterested in interruptibility or gc lifetime information, pass 0 as instructionOffset
GcInfoDecoder(
- PTR_CBYTE gcInfoAddr,
+ GCInfoToken gcInfoToken,
GcInfoDecoderFlags flags,
UINT32 instructionOffset = 0
);
-
//------------------------------------------------------------------------
// Interruptibility
//------------------------------------------------------------------------
@@ -538,12 +533,9 @@ private:
#ifdef _DEBUG
GcInfoDecoderFlags m_Flags;
PTR_CBYTE m_GcInfoAddress;
+ UINT32 m_Version;
#endif
-#ifdef VERIFY_GCINFO
- DbgGcInfo::GcInfoDecoder m_DbgDecoder;
-#endif
-
static bool SetIsInterruptibleCB (UINT32 startOffset, UINT32 stopOffset, LPVOID hCallback);
OBJECTREF* GetRegisterSlot(
@@ -617,13 +609,6 @@ private:
UINT32 regNum = pSlot->Slot.RegisterNumber;
if( reportScratchSlots || !IsScratchRegister( regNum, pRD ) )
{
-#ifdef VERIFY_GCINFO
- m_DbgDecoder.VerifyLiveRegister(
- regNum,
- pSlot->Flags
- );
-#endif
-
ReportRegisterToGC(
regNum,
pSlot->Flags,
@@ -644,14 +629,6 @@ private:
GcStackSlotBase spBase = pSlot->Slot.Stack.Base;
if( reportScratchSlots || !IsScratchStackSlot(spOffset, spBase, pRD) )
{
-#ifdef VERIFY_GCINFO
- m_DbgDecoder.VerifyLiveStackSlot(
- spOffset,
- spBase,
- pSlot->Flags
- );
-#endif
-
ReportStackSlotToGC(
spOffset,
spBase,
diff --git a/src/inc/gcinfodumper.h b/src/inc/gcinfodumper.h
index 64801b06c2..296dd29543 100644
--- a/src/inc/gcinfodumper.h
+++ b/src/inc/gcinfodumper.h
@@ -18,7 +18,7 @@ class GcInfoDumper
{
public:
- GcInfoDumper (PTR_CBYTE pbGCInfo);
+ GcInfoDumper (GCInfoToken gcInfoToken);
~GcInfoDumper ();
// Returns TRUE to stop decoding.
@@ -80,7 +80,7 @@ private:
UINT marked;
};
- PTR_CBYTE m_pbGCInfo;
+ GCInfoToken m_gcTable;
UINT32 m_StackBaseRegister;
UINT32 m_SizeOfEditAndContinuePreservedArea;
LivePointerRecord *m_pRecords;
diff --git a/src/inc/gcinfoencoder.h b/src/inc/gcinfoencoder.h
index ff03138032..8875d3b492 100644
--- a/src/inc/gcinfoencoder.h
+++ b/src/inc/gcinfoencoder.h
@@ -87,10 +87,6 @@
#include "gcinfotypes.h"
-#ifdef VERIFY_GCINFO
-#include "dbggcinfoencoder.h"
-#endif //VERIFY_GCINFO
-
#ifdef MEASURE_GCINFO
struct GcInfoSize
{
@@ -526,10 +522,6 @@ private:
bool m_IsSlotTableFrozen;
#endif
-#ifdef VERIFY_GCINFO
- DbgGcInfo::GcInfoEncoder m_DbgEncoder;
-#endif
-
#ifdef MEASURE_GCINFO
GcInfoSize m_CurrentMethodSize;
#endif
diff --git a/src/inc/gcinfotypes.h b/src/inc/gcinfotypes.h
index a54cec30e5..fc624b2c0a 100644
--- a/src/inc/gcinfotypes.h
+++ b/src/inc/gcinfotypes.h
@@ -6,6 +6,8 @@
#ifndef __GCINFOTYPES_H__
#define __GCINFOTYPES_H__
+#include "gcinfo.h"
+
// This file is included when building an "alt jit". In that case, we are doing a cross-compile:
// we may be building the ARM jit on x86, for example. We generally make that work by conditionalizing on
// a _TARGET_XXX_ variable that we explicitly set in the build, rather than the _XXX_ variable implicitly
@@ -62,19 +64,19 @@
__forceinline size_t SAFE_SHIFT_LEFT(size_t x, size_t count)
{
_ASSERTE(count <= BITS_PER_SIZE_T);
- return (x << 1) << (count-1);
+ return (x << 1) << (count - 1);
}
__forceinline size_t SAFE_SHIFT_RIGHT(size_t x, size_t count)
{
_ASSERTE(count <= BITS_PER_SIZE_T);
- return (x >> 1) >> (count-1);
+ return (x >> 1) >> (count - 1);
}
inline UINT32 CeilOfLog2(size_t x)
{
_ASSERTE(x > 0);
- UINT32 result = (x & (x-1)) ? 1 : 0;
- while(x != 1)
+ UINT32 result = (x & (x - 1)) ? 1 : 0;
+ while (x != 1)
{
result++;
x >>= 1;
@@ -84,24 +86,24 @@ inline UINT32 CeilOfLog2(size_t x)
enum GcSlotFlags
{
- GC_SLOT_BASE = 0x0,
- GC_SLOT_INTERIOR = 0x1,
- GC_SLOT_PINNED = 0x2,
- GC_SLOT_UNTRACKED = 0x4,
+ GC_SLOT_BASE = 0x0,
+ GC_SLOT_INTERIOR = 0x1,
+ GC_SLOT_PINNED = 0x2,
+ GC_SLOT_UNTRACKED = 0x4,
// For internal use by the encoder/decoder
- GC_SLOT_IS_REGISTER = 0x8,
- GC_SLOT_IS_DELETED = 0x10,
+ GC_SLOT_IS_REGISTER = 0x8,
+ GC_SLOT_IS_DELETED = 0x10,
};
enum GcStackSlotBase
{
- GC_CALLER_SP_REL = 0x0,
- GC_SP_REL = 0x1,
- GC_FRAMEREG_REL = 0x2,
+ GC_CALLER_SP_REL = 0x0,
+ GC_SP_REL = 0x1,
+ GC_FRAMEREG_REL = 0x2,
- GC_SPBASE_FIRST = GC_CALLER_SP_REL,
- GC_SPBASE_LAST = GC_FRAMEREG_REL,
+ GC_SPBASE_FIRST = GC_CALLER_SP_REL,
+ GC_SPBASE_LAST = GC_FRAMEREG_REL,
};
#ifdef _DEBUG
@@ -113,11 +115,10 @@ const char* const GcStackSlotBaseNames[] =
};
#endif
-
enum GcSlotState
{
- GC_SLOT_DEAD = 0x0,
- GC_SLOT_LIVE = 0x1,
+ GC_SLOT_DEAD = 0x0,
+ GC_SLOT_LIVE = 0x1,
};
struct GcStackSlot
@@ -135,6 +136,238 @@ struct GcStackSlot
}
};
+#ifdef _TARGET_X86_
+
+#include <stdlib.h> // For memcmp()
+#include "bitvector.h" // for ptrArgTP
+
+#ifndef FASTCALL
+#define FASTCALL __fastcall
+#endif
+
+// we use offsetof to get the offset of a field
+#include <stddef.h> // offsetof
+#ifndef offsetof
+#define offsetof(s,m) ((size_t)&(((s *)0)->m))
+#endif
+
+enum infoHdrAdjustConstants {
+ // Constants
+ SET_FRAMESIZE_MAX = 7,
+ SET_ARGCOUNT_MAX = 8, // Change to 6
+ SET_PROLOGSIZE_MAX = 16,
+ SET_EPILOGSIZE_MAX = 10, // Change to 6
+ SET_EPILOGCNT_MAX = 4,
+ SET_UNTRACKED_MAX = 3
+};
+
+//
+// Enum to define the 128 codes that are used to incrementally adjust the InfoHdr structure
+//
+enum infoHdrAdjust {
+
+ SET_FRAMESIZE = 0, // 0x00
+ SET_ARGCOUNT = SET_FRAMESIZE + SET_FRAMESIZE_MAX + 1, // 0x08
+ SET_PROLOGSIZE = SET_ARGCOUNT + SET_ARGCOUNT_MAX + 1, // 0x11
+ SET_EPILOGSIZE = SET_PROLOGSIZE + SET_PROLOGSIZE_MAX + 1, // 0x22
+ SET_EPILOGCNT = SET_EPILOGSIZE + SET_EPILOGSIZE_MAX + 1, // 0x2d
+ SET_UNTRACKED = SET_EPILOGCNT + (SET_EPILOGCNT_MAX + 1) * 2, // 0x37
+
+ FIRST_FLIP = SET_UNTRACKED + SET_UNTRACKED_MAX + 1,
+
+ FLIP_EDI_SAVED = FIRST_FLIP, // 0x3b
+ FLIP_ESI_SAVED, // 0x3c
+ FLIP_EBX_SAVED, // 0x3d
+ FLIP_EBP_SAVED, // 0x3e
+ FLIP_EBP_FRAME, // 0x3f
+ FLIP_INTERRUPTIBLE, // 0x40
+ FLIP_DOUBLE_ALIGN, // 0x41
+ FLIP_SECURITY, // 0x42
+ FLIP_HANDLERS, // 0x43
+ FLIP_LOCALLOC, // 0x44
+ FLIP_EDITnCONTINUE, // 0x45
+ FLIP_VAR_PTR_TABLE_SZ, // 0x46 Flip whether a table-size exits after the header encoding
+ FFFF_UNTRACKED_CNT, // 0x47 There is a count (>SET_UNTRACKED_MAX) after the header encoding
+ FLIP_VARARGS, // 0x48
+ FLIP_PROF_CALLBACKS, // 0x49
+ FLIP_HAS_GS_COOKIE, // 0x4A - The offset of the GuardStack cookie follows after the header encoding
+ FLIP_SYNC, // 0x4B
+ FLIP_HAS_GENERICS_CONTEXT,// 0x4C
+ FLIP_GENERICS_CONTEXT_IS_METHODDESC,// 0x4D
+
+ // 0x4E .. 0x4f unused
+
+ NEXT_FOUR_START = 0x50,
+ NEXT_FOUR_FRAMESIZE = 0x50,
+ NEXT_FOUR_ARGCOUNT = 0x60,
+ NEXT_THREE_PROLOGSIZE = 0x70,
+ NEXT_THREE_EPILOGSIZE = 0x78
+};
+
+#define HAS_UNTRACKED ((unsigned int) -1)
+#define HAS_VARPTR ((unsigned int) -1)
+// 0 is not a valid offset for EBP-frames as all locals are at a negative offset
+// For ESP frames, the cookie is above (at a higher address than) the buffers,
+// and so cannot be at offset 0.
+#define INVALID_GS_COOKIE_OFFSET 0
+// Temporary value to indicate that the offset needs to be read after the header
+#define HAS_GS_COOKIE_OFFSET ((unsigned int) -1)
+
+// 0 is not a valid sync offset
+#define INVALID_SYNC_OFFSET 0
+// Temporary value to indicate that the offset needs to be read after the header
+#define HAS_SYNC_OFFSET ((unsigned int) -1)
+
+#define INVALID_ARGTAB_OFFSET 0
+
+#include <pshpack1.h>
+
+// Working set optimization: saving 12 * 128 = 1536 bytes in infoHdrShortcut
+struct InfoHdr;
+
+struct InfoHdrSmall {
+ unsigned char prologSize; // 0
+ unsigned char epilogSize; // 1
+ unsigned char epilogCount : 3; // 2 [0:2]
+ unsigned char epilogAtEnd : 1; // 2 [3]
+ unsigned char ediSaved : 1; // 2 [4] which callee-saved regs are pushed onto stack
+ unsigned char esiSaved : 1; // 2 [5]
+ unsigned char ebxSaved : 1; // 2 [6]
+ unsigned char ebpSaved : 1; // 2 [7]
+ unsigned char ebpFrame : 1; // 3 [0] locals accessed relative to ebp
+ unsigned char interruptible : 1; // 3 [1] is intr. at all points (except prolog/epilog), not just call-sites
+ unsigned char doubleAlign : 1; // 3 [2] uses double-aligned stack (ebpFrame will be false)
+ unsigned char security : 1; // 3 [3] has slot for security object
+ unsigned char handlers : 1; // 3 [4] has callable handlers
+ unsigned char localloc : 1; // 3 [5] uses localloc
+ unsigned char editNcontinue : 1; // 3 [6] was JITed in EnC mode
+ unsigned char varargs : 1; // 3 [7] function uses varargs calling convention
+ unsigned char profCallbacks : 1; // 4 [0]
+ unsigned char genericsContext : 1;//4 [1] function reports a generics context parameter is present
+ unsigned char genericsContextIsMethodDesc : 1;//4[2]
+ unsigned short argCount; // 5,6 in bytes
+ unsigned int frameSize; // 7,8,9,10 in bytes
+ unsigned int untrackedCnt; // 11,12,13,14
+ unsigned int varPtrTableSize; // 15.16,17,18
+
+ // Checks whether "this" is compatible with "target".
+ // It is not an exact bit match as "this" could have some
+ // marker/place-holder values, which will have to be written out
+ // after the header.
+
+ bool isHeaderMatch(const InfoHdr& target) const;
+};
+
+
+struct InfoHdr : public InfoHdrSmall {
+ // 0 (zero) means that there is no GuardStack cookie
+ // The cookie is either at ESP+gsCookieOffset or EBP-gsCookieOffset
+ unsigned int gsCookieOffset; // 19,20,21,22
+ unsigned int syncStartOffset; // 23,24,25,26
+ unsigned int syncEndOffset; // 27,28,29,30
+
+ // 31 bytes total
+
+ // Checks whether "this" is compatible with "target".
+ // It is not an exact bit match as "this" could have some
+ // marker/place-holder values, which will have to be written out
+ // after the header.
+
+ bool isHeaderMatch(const InfoHdr& target) const
+ {
+#ifdef _ASSERTE
+ // target cannot have place-holder values.
+ _ASSERTE(target.untrackedCnt != HAS_UNTRACKED &&
+ target.varPtrTableSize != HAS_VARPTR &&
+ target.gsCookieOffset != HAS_GS_COOKIE_OFFSET &&
+ target.syncStartOffset != HAS_SYNC_OFFSET);
+#endif
+
+ // compare two InfoHdr's up to but not including the untrackCnt field
+ if (memcmp(this, &target, offsetof(InfoHdr, untrackedCnt)) != 0)
+ return false;
+
+ if (untrackedCnt != target.untrackedCnt) {
+ if (target.untrackedCnt <= SET_UNTRACKED_MAX)
+ return false;
+ else if (untrackedCnt != HAS_UNTRACKED)
+ return false;
+ }
+
+ if (varPtrTableSize != target.varPtrTableSize) {
+ if ((varPtrTableSize != 0) != (target.varPtrTableSize != 0))
+ return false;
+ }
+
+ if ((gsCookieOffset == INVALID_GS_COOKIE_OFFSET) !=
+ (target.gsCookieOffset == INVALID_GS_COOKIE_OFFSET))
+ return false;
+
+ if ((syncStartOffset == INVALID_SYNC_OFFSET) !=
+ (target.syncStartOffset == INVALID_SYNC_OFFSET))
+ return false;
+
+ return true;
+ }
+};
+
+
+union CallPattern {
+ struct {
+ unsigned char argCnt;
+ unsigned char regMask; // EBP=0x8, EBX=0x4, ESI=0x2, EDI=0x1
+ unsigned char argMask;
+ unsigned char codeDelta;
+ } fld;
+ unsigned val;
+};
+
+#include <poppack.h>
+
+#define IH_MAX_PROLOG_SIZE (51)
+
+extern const InfoHdrSmall infoHdrShortcut[];
+extern int infoHdrLookup[];
+
+inline void GetInfoHdr(int index, InfoHdr * header)
+{
+ *((InfoHdrSmall *)header) = infoHdrShortcut[index];
+
+ header->gsCookieOffset = 0;
+ header->syncStartOffset = 0;
+ header->syncEndOffset = 0;
+}
+
+PTR_CBYTE FASTCALL decodeHeader(PTR_CBYTE table, InfoHdr* header);
+
+BYTE FASTCALL encodeHeaderFirst(const InfoHdr& header, InfoHdr* state, int* more, int *pCached);
+BYTE FASTCALL encodeHeaderNext(const InfoHdr& header, InfoHdr* state);
+
+size_t FASTCALL decodeUnsigned(PTR_CBYTE src, unsigned* value);
+size_t FASTCALL decodeUDelta(PTR_CBYTE src, unsigned* value, unsigned lastValue);
+size_t FASTCALL decodeSigned(PTR_CBYTE src, int * value);
+
+#define CP_MAX_CODE_DELTA (0x23)
+#define CP_MAX_ARG_CNT (0x02)
+#define CP_MAX_ARG_MASK (0x00)
+
+extern const unsigned callPatternTable[];
+extern const unsigned callCommonDelta[];
+
+
+int FASTCALL lookupCallPattern(unsigned argCnt,
+ unsigned regMask,
+ unsigned argMask,
+ unsigned codeDelta);
+
+void FASTCALL decodeCallPattern(int pattern,
+ unsigned * argCnt,
+ unsigned * regMask,
+ unsigned * argMask,
+ unsigned * codeDelta);
+
+#endif // _TARGET_86_
+
// Stack offsets must be 8-byte aligned, so we use this unaligned
// offset to represent that the method doesn't have a security object
#define NO_SECURITY_OBJECT (-1)
@@ -144,7 +377,6 @@ struct GcStackSlot
#define NO_GENERICS_INST_CONTEXT (-1)
#define NO_PSP_SYM (-1)
-
#if defined(_TARGET_AMD64_)
#ifndef TARGET_POINTER_SIZE
diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp
index 1196e610ac..ffd5b70c8f 100755
--- a/src/jit/codegencommon.cpp
+++ b/src/jit/codegencommon.cpp
@@ -1398,6 +1398,29 @@ regNumber CodeGenInterface::genGetThisArgReg(GenTreePtr call)
return REG_ARG_0;
}
+//----------------------------------------------------------------------
+// getSpillTempDsc: get the TempDsc corresponding to a spilled tree.
+//
+// Arguments:
+// tree - spilled GenTree node
+//
+// Return Value:
+// TempDsc corresponding to tree
+TempDsc* CodeGenInterface::getSpillTempDsc(GenTree* tree)
+{
+ // tree must be in spilled state.
+ assert((tree->gtFlags & GTF_SPILLED) != 0);
+
+ // Get the tree's SpillDsc.
+ RegSet::SpillDsc* prevDsc;
+ RegSet::SpillDsc* spillDsc = regSet.rsGetSpillInfo(tree, tree->gtRegNum, &prevDsc);
+ assert(spillDsc != nullptr);
+
+ // Get the temp desc.
+ TempDsc* temp = regSet.rsGetSpillTempWord(tree->gtRegNum, spillDsc, prevDsc);
+ return temp;
+}
+
#ifdef _TARGET_XARCH_
#ifdef _TARGET_AMD64_
diff --git a/src/jit/codegeninterface.h b/src/jit/codegeninterface.h
index cb4774d240..d321b5719a 100644
--- a/src/jit/codegeninterface.h
+++ b/src/jit/codegeninterface.h
@@ -313,6 +313,9 @@ public:
void SpillFloat (regNumber reg, bool bIsCall = false);
#endif // LEGACY_BACKEND
+ // The following method is used by xarch emitter for handling contained tree temps.
+ TempDsc* getSpillTempDsc(GenTree* tree);
+
public:
emitter* getEmitter() { return m_cgEmitter; }
protected:
diff --git a/src/jit/codegenxarch.cpp b/src/jit/codegenxarch.cpp
index 4483a4f94c..5fea9fc583 100755
--- a/src/jit/codegenxarch.cpp
+++ b/src/jit/codegenxarch.cpp
@@ -1341,7 +1341,7 @@ void CodeGen::genCodeForDivMod(GenTreeOp* treeNode)
{
emit->emitInsBinary(genGetInsForOper(treeNode->gtOper, targetType), size, treeNode, divisor);
}
- else if (divisor->gtRegNum == targetReg)
+ else if (!divisor->isContained() && divisor->gtRegNum == targetReg)
{
// It is not possible to generate 2-operand divss or divsd where reg2 = reg1 / reg2
// because divss/divsd reg1, reg2 will over-write reg1. Therefore, in case of AMD64
@@ -1466,8 +1466,8 @@ void CodeGen::genCodeForBinary(GenTree* treeNode)
// The arithmetic node must be sitting in a register (since it's not contained)
noway_assert(targetReg != REG_NA);
- regNumber op1reg = op1->gtRegNum;
- regNumber op2reg = op2->gtRegNum;
+ regNumber op1reg = op1->isContained() ? REG_NA: op1->gtRegNum;
+ regNumber op2reg = op2->isContained() ? REG_NA: op2->gtRegNum;
GenTreePtr dst;
GenTreePtr src;
@@ -5170,7 +5170,11 @@ void CodeGen::genConsumeRegs(GenTree* tree)
if (tree->isContained())
{
- if (tree->isIndir())
+ if (tree->isContainedSpillTemp())
+ {
+ // spill temps are un-tracked and hence no need to update life
+ }
+ else if (tree->isIndir())
{
genConsumeAddress(tree->AsIndir()->Addr());
}
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index 324a19de62..033837e172 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -2006,12 +2006,16 @@ public:
GenTreePtr gtWalkOpEffectiveVal(GenTreePtr op);
#endif
- void gtPrepareCost (GenTree * tree);
- bool gtIsLikelyRegVar(GenTree * tree);
+ void gtPrepareCost (GenTree* tree);
+ bool gtIsLikelyRegVar(GenTree* tree);
unsigned gtSetEvalOrderAndRestoreFPstkLevel(GenTree * tree);
- unsigned gtSetEvalOrder (GenTree * tree);
+ // Returns true iff the secondNode can be swapped with firstNode.
+ bool gtCanSwapOrder (GenTree* firstNode,
+ GenTree* secondNode);
+
+ unsigned gtSetEvalOrder (GenTree* tree);
#if FEATURE_STACK_FP_X87
bool gtFPstLvlRedo;
@@ -4772,7 +4776,7 @@ private:
fgWalkResult fgMorphStructField(GenTreePtr tree, fgWalkData *fgWalkPre);
fgWalkResult fgMorphLocalField(GenTreePtr tree, fgWalkData *fgWalkPre);
void fgMarkImplicitByRefArgs();
- bool fgMorphImplicitByRefArgs(GenTreePtr tree, fgWalkData *fgWalkPre);
+ bool fgMorphImplicitByRefArgs(GenTree** pTree, fgWalkData *fgWalkPre);
static fgWalkPreFn fgMarkAddrTakenLocalsPreCB;
static fgWalkPostFn fgMarkAddrTakenLocalsPostCB;
void fgMarkAddressExposedLocals();
@@ -5405,8 +5409,9 @@ protected :
//
void optCSE_GetMaskData (GenTreePtr tree, optCSE_MaskData* pMaskData);
- // Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2
- bool optCSE_canSwap (GenTreePtr tree);
+ // Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2.
+ bool optCSE_canSwap(GenTree* firstNode, GenTree* secondNode);
+ bool optCSE_canSwap(GenTree* tree);
static fgWalkPostFn optPropagateNonCSE;
static fgWalkPreFn optHasNonCSEChild;
diff --git a/src/jit/emitxarch.cpp b/src/jit/emitxarch.cpp
index 388a51d82d..632cc023e5 100644
--- a/src/jit/emitxarch.cpp
+++ b/src/jit/emitxarch.cpp
@@ -2893,7 +2893,7 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
{
dblConst = src->AsDblCon();
}
-
+
// find local field if any
GenTreeLclFld* lclField = nullptr;
if (src->isContainedLclField())
@@ -2918,9 +2918,25 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
lclVar = dst->AsLclVar();
}
+ // find contained spill tmp if any
+ TempDsc* tmpDsc = nullptr;
+ if (src->isContainedSpillTemp())
+ {
+ assert(src->IsRegOptional());
+ tmpDsc = codeGen->getSpillTempDsc(src);
+ }
+ else if (dst->isContainedSpillTemp())
+ {
+ assert(dst->IsRegOptional());
+ tmpDsc = codeGen->getSpillTempDsc(dst);
+ }
+
// First handle the simple non-memory cases
//
- if ((mem == nullptr) && (lclField == nullptr) && (lclVar == nullptr))
+ if ((mem == nullptr) &&
+ (lclField == nullptr) &&
+ (lclVar == nullptr) &&
+ (tmpDsc == nullptr))
{
if (intConst != nullptr)
{
@@ -2959,7 +2975,7 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
// Next handle the cases where we have a stack based local memory operand.
//
unsigned varNum = BAD_VAR_NUM;
- unsigned offset = (unsigned) -1;
+ unsigned offset = (unsigned)-1;
if (lclField != nullptr)
{
@@ -2971,12 +2987,22 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
varNum = lclVar->AsLclVarCommon()->GetLclNum();
offset = 0;
}
+ else if (tmpDsc != nullptr)
+ {
+ varNum = tmpDsc->tdTempNum();
+ offset = 0;
+ }
- if (varNum != BAD_VAR_NUM)
+ // Spill temp numbers are negative and start with -1
+ // which also happens to be BAD_VAR_NUM. For this reason
+ // we also need to check 'tmpDsc != nullptr' here.
+ if (varNum != BAD_VAR_NUM ||
+ tmpDsc != nullptr)
{
// Is the memory op in the source position?
if (src->isContainedLclField() ||
- src->isContainedLclVar())
+ src->isContainedLclVar() ||
+ src->isContainedSpillTemp())
{
if (instrHasImplicitRegPairDest(ins))
{
@@ -2993,7 +3019,7 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
}
else // The memory op is in the dest position.
{
- assert(dst->gtRegNum == REG_NA);
+ assert(dst->gtRegNum == REG_NA || dst->IsRegOptional());
// src could be int or reg
if (src->isContainedIntOrIImmed())
@@ -3011,6 +3037,11 @@ regNumber emitter::emitInsBinary(instruction ins, emitAttr attr, GenTree* dst, G
}
}
+ if (tmpDsc != nullptr)
+ {
+ emitComp->tmpRlsTemp(tmpDsc);
+ }
+
return dst->gtRegNum;
}
diff --git a/src/jit/flowgraph.cpp b/src/jit/flowgraph.cpp
index ed2e6f2377..fa2c396ecc 100644
--- a/src/jit/flowgraph.cpp
+++ b/src/jit/flowgraph.cpp
@@ -6771,13 +6771,23 @@ bool Compiler::fgIsCommaThrow(GenTreePtr tree,
return false;
}
-
+//------------------------------------------------------------------------
+// fgIsIndirOfAddrOfLocal: Determine whether "tree" is an indirection of a local.
+//
+// Arguments:
+// tree - The tree node under consideration
+//
+// Return Value:
+// If "tree" is a indirection (GT_IND, GT_BLK, or GT_OBJ) whose arg is an ADDR,
+// whose arg in turn is a LCL_VAR, return that LCL_VAR node, else nullptr.
+//
+// static
GenTreePtr Compiler::fgIsIndirOfAddrOfLocal(GenTreePtr tree)
{
GenTreePtr res = nullptr;
- if (tree->OperGet() == GT_OBJ || tree->OperIsIndir())
+ if (tree->OperIsIndir())
{
- GenTreePtr addr = tree->gtOp.gtOp1;
+ GenTreePtr addr = tree->AsIndir()->Addr();
// Post rationalization, we can have Indir(Lea(..) trees. Therefore to recognize
// Indir of addr of a local, skip over Lea in Indir(Lea(base, index, scale, offset))
diff --git a/src/jit/gcencode.cpp b/src/jit/gcencode.cpp
index c3ae12bcf4..fb033ddfae 100644
--- a/src/jit/gcencode.cpp
+++ b/src/jit/gcencode.cpp
@@ -21,7 +21,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif
-#include "gcinfo.h"
+#include "gcinfotypes.h"
#ifdef JIT32_GCENCODER
@@ -3236,7 +3236,7 @@ unsigned GCInfo::gcInfoBlockHdrDump(const BYTE* table,
InfoHdr* header,
unsigned* methodSize)
{
- GCDump gcDump;
+ GCDump gcDump(GCINFO_VERSION);
gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
printf("Method info block:\n");
@@ -3252,7 +3252,7 @@ unsigned GCInfo::gcDumpPtrTable(const BYTE* table,
{
printf("Pointer table:\n");
- GCDump gcDump;
+ GCDump gcDump(GCINFO_VERSION);
gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
return gcDump.DumpGCTable(table, header, methodSize, verifyGCTables);
@@ -3268,7 +3268,7 @@ void GCInfo::gcFindPtrsInFrame(const void* infoBlock,
const void* codeBlock,
unsigned offs)
{
- GCDump gcDump;
+ GCDump gcDump(GCINFO_VERSION);
gcDump.gcPrintf = gcDump_logf; // use my printf (which logs to VM)
gcDump.DumpPtrsInFrame((const BYTE*)infoBlock, (const BYTE*)codeBlock, offs, verifyGCTables);
diff --git a/src/jit/gentree.cpp b/src/jit/gentree.cpp
index 30f25b2c57..5c7b49a55a 100644
--- a/src/jit/gentree.cpp
+++ b/src/jit/gentree.cpp
@@ -2978,6 +2978,71 @@ bool Compiler::gtIsLikelyRegVar(GenTree * tree)
return true;
}
+//------------------------------------------------------------------------
+// gtCanSwapOrder: Returns true iff the secondNode can be swapped with firstNode.
+//
+// Arguments:
+// firstNode - An operand of a tree that can have GTF_REVERSE_OPS set.
+// secondNode - The other operand of the tree.
+//
+// Return Value:
+// Returns a boolean indicating whether it is safe to reverse the execution
+// order of the two trees, considering any exception, global effects, or
+// ordering constraints.
+//
+bool
+Compiler::gtCanSwapOrder(GenTree* firstNode, GenTree* secondNode)
+{
+ // Relative of order of global / side effects can't be swapped.
+
+ bool canSwap = true;
+
+ if (optValnumCSE_phase)
+ {
+ canSwap = optCSE_canSwap(firstNode, secondNode);
+ }
+
+ // We cannot swap in the presence of special side effects such as GT_CATCH_ARG.
+
+ if (canSwap &&
+ (firstNode->gtFlags & GTF_ORDER_SIDEEFF))
+ {
+ canSwap = false;
+ }
+
+ // When strict side effect order is disabled we allow GTF_REVERSE_OPS to be set
+ // when one or both sides contains a GTF_CALL or GTF_EXCEPT.
+ // Currently only the C and C++ languages allow non strict side effect order.
+
+ unsigned strictEffects = GTF_GLOB_EFFECT;
+
+ if (canSwap &&
+ (firstNode->gtFlags & strictEffects))
+ {
+ // op1 has side efects that can't be reordered.
+ // Check for some special cases where we still may be able to swap.
+
+ if (secondNode->gtFlags & strictEffects)
+ {
+ // op2 has also has non reorderable side effects - can't swap.
+ canSwap = false;
+ }
+ else
+ {
+ // No side effects in op2 - we can swap iff op1 has no way of modifying op2,
+ // i.e. through byref assignments or calls or op2 is a constant.
+
+ if (firstNode->gtFlags & strictEffects & GTF_PERSISTENT_SIDE_EFFECTS)
+ {
+ // We have to be conservative - can swap iff op2 is constant.
+ if (!secondNode->OperIsConst())
+ canSwap = false;
+ }
+ }
+ }
+ return canSwap;
+}
+
/*****************************************************************************
*
* Given a tree, figure out the order in which its sub-operands should be
@@ -3543,17 +3608,18 @@ COMMON_CNS:
unsigned mul;
#endif
unsigned cns;
- GenTreePtr adr;
+ GenTreePtr base;
GenTreePtr idx;
/* See if we can form a complex addressing mode? */
- if (codeGen->genCreateAddrMode(op1, // address
+ GenTreePtr addr = op1;
+ if (codeGen->genCreateAddrMode(addr, // address
0, // mode
false, // fold
RBM_NONE, // reg mask
&rev, // reverse ops
- &adr, // base addr
+ &base, // base addr
&idx, // index val
#if SCALED_ADDR_MODES
&mul, // scaling
@@ -3564,17 +3630,17 @@ COMMON_CNS:
// We can form a complex addressing mode, so mark each of the interior
// nodes with GTF_ADDRMODE_NO_CSE and calculate a more accurate cost.
- op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
#ifdef _TARGET_XARCH_
// addrmodeCount is the count of items that we used to form
// an addressing mode. The maximum value is 4 when we have
- // all of these: { adr, idx, cns, mul }
+ // all of these: { base, idx, cns, mul }
//
unsigned addrmodeCount = 0;
- if (adr)
+ if (base)
{
- costEx += adr->gtCostEx;
- costSz += adr->gtCostSz;
+ costEx += base->gtCostEx;
+ costSz += base->gtCostSz;
addrmodeCount++;
}
@@ -3604,7 +3670,7 @@ COMMON_CNS:
// / \ --
// GT_ADD 'cns' -- reduce this interior GT_ADD by (-2,-2)
// / \ --
- // 'adr' GT_LSL -- reduce this interior GT_LSL by (-1,-1)
+ // 'base' GT_LSL -- reduce this interior GT_LSL by (-1,-1)
// / \ --
// 'idx' 'mul'
//
@@ -3614,7 +3680,7 @@ COMMON_CNS:
//
addrmodeCount--;
- GenTreePtr tmp = op1;
+ GenTreePtr tmp = addr;
while (addrmodeCount > 0)
{
// decrement the gtCosts for the interior GT_ADD or GT_LSH node by the remaining addrmodeCount
@@ -3627,7 +3693,7 @@ COMMON_CNS:
GenTreePtr tmpOp2 = tmp->gtGetOp2();
assert(tmpOp2 != nullptr);
- if ((tmpOp1 != adr) && (tmpOp1->OperGet() == GT_ADD))
+ if ((tmpOp1 != base) && (tmpOp1->OperGet() == GT_ADD))
{
tmp = tmpOp1;
}
@@ -3653,11 +3719,11 @@ COMMON_CNS:
}
}
#elif defined _TARGET_ARM_
- if (adr)
+ if (base)
{
- costEx += adr->gtCostEx;
- costSz += adr->gtCostSz;
- if ((adr->gtOper == GT_LCL_VAR) &&
+ costEx += base->gtCostEx;
+ costSz += base->gtCostSz;
+ if ((base->gtOper == GT_LCL_VAR) &&
((idx==NULL) || (cns==0)))
{
costSz -= 1;
@@ -3691,10 +3757,10 @@ COMMON_CNS:
}
}
#elif defined _TARGET_ARM64_
- if (adr)
+ if (base)
{
- costEx += adr->gtCostEx;
- costSz += adr->gtCostSz;
+ costEx += base->gtCostEx;
+ costSz += base->gtCostSz;
}
if (idx)
@@ -3715,62 +3781,62 @@ COMMON_CNS:
#error "Unknown _TARGET_"
#endif
- assert(op1->gtOper == GT_ADD);
- assert(!op1->gtOverflow());
+ assert(addr->gtOper == GT_ADD);
+ assert(!addr->gtOverflow());
assert(op2 == NULL);
assert(mul != 1);
// If we have an addressing mode, we have one of:
- // [adr + cns]
- // [ idx * mul ] // mul >= 2, else we would use adr instead of idx
- // [ idx * mul + cns] // mul >= 2, else we would use adr instead of idx
- // [adr + idx * mul ] // mul can be 0, 2, 4, or 8
- // [adr + idx * mul + cns] // mul can be 0, 2, 4, or 8
+ // [base + cns]
+ // [ idx * mul ] // mul >= 2, else we would use base instead of idx
+ // [ idx * mul + cns] // mul >= 2, else we would use base instead of idx
+ // [base + idx * mul ] // mul can be 0, 2, 4, or 8
+ // [base + idx * mul + cns] // mul can be 0, 2, 4, or 8
// Note that mul == 0 is semantically equivalent to mul == 1.
// Note that cns can be zero.
#if SCALED_ADDR_MODES
- assert((adr != NULL) || (idx != NULL && mul >= 2));
+ assert((base != NULL) || (idx != NULL && mul >= 2));
#else
- assert(adr != NULL);
+ assert(base != NULL);
#endif
- INDEBUG(GenTreePtr op1Save = op1);
+ INDEBUG(GenTreePtr op1Save = addr);
- /* Walk op1 looking for non-overflow GT_ADDs */
- gtWalkOp(&op1, &op2, adr, false);
+ /* Walk addr looking for non-overflow GT_ADDs */
+ gtWalkOp(&addr, &op2, base, false);
- // op1 and op2 are now children of the root GT_ADD of the addressing mode
- assert(op1 != op1Save);
+ // addr and op2 are now children of the root GT_ADD of the addressing mode
+ assert(addr != op1Save);
assert(op2 != NULL);
- /* Walk op1 looking for non-overflow GT_ADDs of constants */
- gtWalkOp(&op1, &op2, NULL, true);
+ /* Walk addr looking for non-overflow GT_ADDs of constants */
+ gtWalkOp(&addr, &op2, NULL, true);
// TODO-Cleanup: It seems very strange that we might walk down op2 now, even though the prior
// call to gtWalkOp() may have altered op2.
/* Walk op2 looking for non-overflow GT_ADDs of constants */
- gtWalkOp(&op2, &op1, NULL, true);
+ gtWalkOp(&op2, &addr, NULL, true);
// OK we are done walking the tree
- // Now assert that op1 and op2 correspond with adr and idx
+ // Now assert that addr and op2 correspond with base and idx
// in one of the several acceptable ways.
- // Note that sometimes op1/op2 is equal to idx/adr
- // and other times op1/op2 is a GT_COMMA node with
- // an effective value that is idx/adr
+ // Note that sometimes addr/op2 is equal to idx/base
+ // and other times addr/op2 is a GT_COMMA node with
+ // an effective value that is idx/base
if (mul > 1)
{
- if ((op1 != adr) && (op1->gtOper == GT_LSH))
+ if ((addr != base) && (addr->gtOper == GT_LSH))
{
- op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
- if (op1->gtOp.gtOp1->gtOper == GT_MUL)
+ addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ if (addr->gtOp.gtOp1->gtOper == GT_MUL)
{
- op1->gtOp.gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ addr->gtOp.gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
- assert((adr == NULL) || (op2 == adr) || (op2->gtEffectiveVal() == adr->gtEffectiveVal()) ||
- (gtWalkOpEffectiveVal(op2) == gtWalkOpEffectiveVal(adr)));
+ assert((base == NULL) || (op2 == base) || (op2->gtEffectiveVal() == base->gtEffectiveVal()) ||
+ (gtWalkOpEffectiveVal(op2) == gtWalkOpEffectiveVal(base)));
}
else
{
@@ -3785,7 +3851,7 @@ COMMON_CNS:
op2op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
op2op1 = op2op1->gtOp.gtOp1;
}
- assert(op1->gtEffectiveVal() == adr);
+ assert(addr->gtEffectiveVal() == base);
assert(op2op1 == idx);
}
}
@@ -3793,24 +3859,24 @@ COMMON_CNS:
{
assert(mul == 0);
- if ((op1 == idx) || (op1->gtEffectiveVal() == idx))
+ if ((addr == idx) || (addr->gtEffectiveVal() == idx))
{
if (idx != NULL)
{
- if ((op1->gtOper == GT_MUL) || (op1->gtOper == GT_LSH))
+ if ((addr->gtOper == GT_MUL) || (addr->gtOper == GT_LSH))
{
- if ((op1->gtOp.gtOp1->gtOper == GT_NOP) ||
- (op1->gtOp.gtOp1->gtOper == GT_MUL && op1->gtOp.gtOp1->gtOp.gtOp1->gtOper == GT_NOP))
+ if ((addr->gtOp.gtOp1->gtOper == GT_NOP) ||
+ (addr->gtOp.gtOp1->gtOper == GT_MUL && addr->gtOp.gtOp1->gtOp.gtOp1->gtOper == GT_NOP))
{
- op1->gtFlags |= GTF_ADDRMODE_NO_CSE;
- if (op1->gtOp.gtOp1->gtOper == GT_MUL)
- op1->gtOp.gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ addr->gtFlags |= GTF_ADDRMODE_NO_CSE;
+ if (addr->gtOp.gtOp1->gtOper == GT_MUL)
+ addr->gtOp.gtOp1->gtFlags |= GTF_ADDRMODE_NO_CSE;
}
}
}
- assert((op2 == adr) || (op2->gtEffectiveVal() == adr));
+ assert((op2 == base) || (op2->gtEffectiveVal() == base));
}
- else if ((op1 == adr) || (op1->gtEffectiveVal() == adr))
+ else if ((addr == base) || (addr->gtEffectiveVal() == base))
{
if (idx != NULL)
{
@@ -3831,7 +3897,7 @@ COMMON_CNS:
}
else
{
- // op1 isn't adr or idx. Is this possible? Or should there be an assert?
+ // addr isn't base or idx. Is this possible? Or should there be an assert?
}
}
goto DONE;
@@ -4302,60 +4368,7 @@ COMMON_CNS:
if (tryToSwap)
{
- /* Relative of order of global / side effects can't be swapped */
-
- bool canSwap = true;
-
- if (optValnumCSE_phase)
- {
- canSwap = optCSE_canSwap(tree);
- }
-
- /* We cannot swap in the presence of special side effects such as GT_CATCH_ARG */
-
- if (canSwap &&
- (opA->gtFlags & GTF_ORDER_SIDEEFF))
- {
- canSwap = false;
- }
-
- /* When strict side effect order is disabled we allow
- * GTF_REVERSE_OPS to be set when one or both sides contains
- * a GTF_CALL or GTF_EXCEPT.
- * Currently only the C and C++ languages
- * allow non strict side effect order
- */
- unsigned strictEffects = GTF_GLOB_EFFECT;
-
- if (canSwap &&
- (opA->gtFlags & strictEffects))
- {
- /* op1 has side efects, that can't be reordered.
- * Check for some special cases where we still
- * may be able to swap
- */
-
- if (opB->gtFlags & strictEffects)
- {
- /* op2 has also has non reorderable side effects - can't swap */
- canSwap = false;
- }
- else
- {
- /* No side effects in op2 - we can swap iff
- * op1 has no way of modifying op2,
- * i.e. through byref assignments or calls
- * unless op2 is a constant
- */
-
- if (opA->gtFlags & strictEffects & GTF_PERSISTENT_SIDE_EFFECTS)
- {
- /* We have to be conservative - can swap iff op2 is constant */
- if (!opB->OperIsConst())
- canSwap = false;
- }
- }
- }
+ bool canSwap = gtCanSwapOrder(opA, opB);
if (canSwap)
{
@@ -12191,7 +12204,7 @@ void Compiler::gtExtractSideEffList(GenTreePtr expr, GenTreePtr *
// Special case - GT_ADDR of GT_IND nodes of TYP_STRUCT
// have to be kept together
- if (oper == GT_ADDR && op1->gtOper == GT_IND && op1->gtType == TYP_STRUCT)
+ if (oper == GT_ADDR && op1->OperIsIndir() && op1->gtType == TYP_STRUCT)
{
*pList = gtBuildCommaList(*pList, expr);
@@ -13306,13 +13319,22 @@ GenTree::IsLclVarUpdateTree(GenTree** pOtherTree, genTreeOps *pOper)
// until after the LSRA phase has allocated physical registers to the treenodes.
bool GenTree::isContained() const
{
+ if (isContainedSpillTemp())
+ {
+ return true;
+ }
+
if (gtHasReg())
+ {
return false;
+ }
// these actually produce a register (the flags reg, we just don't model it)
// and are a separate instruction from the branch that consumes the result
if (OperKind() & GTK_RELOP)
+ {
return false;
+ }
// TODO-Cleanup : this is not clean, would be nice to have some way of marking this.
switch (OperGet())
@@ -13388,7 +13410,7 @@ bool GenTree::isContainedIndir() const
bool GenTree::isIndirAddrMode()
{
- return isIndir() && gtOp.gtOp1->OperIsAddrMode() && gtOp.gtOp1->isContained();
+ return isIndir() && AsIndir()->Addr()->OperIsAddrMode() && AsIndir()->Addr()->isContained();
}
bool GenTree::isIndir() const
@@ -14041,7 +14063,7 @@ void GenTree::ParseArrayAddressWork(Compiler* comp, ssize_t inputMul, GenTreePtr
bool GenTree::ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqNode** pFldSeq)
{
- if (OperGet() == GT_IND)
+ if (OperIsIndir())
{
if (gtFlags & GTF_IND_ARR_INDEX)
{
@@ -14051,7 +14073,7 @@ bool GenTree::ParseArrayElemForm(Compiler* comp, ArrayInfo* arrayInfo, FieldSeqN
}
// Otherwise...
- GenTreePtr addr = gtOp.gtOp1;
+ GenTreePtr addr = AsIndir()->Addr();
return addr->ParseArrayElemAddrForm(comp, arrayInfo, pFldSeq);
}
else
diff --git a/src/jit/gentree.h b/src/jit/gentree.h
index 716d38553e..48a04b6d9e 100644
--- a/src/jit/gentree.h
+++ b/src/jit/gentree.h
@@ -495,7 +495,9 @@ public:
bool isContainedLclField() const { return isContained() && isLclField(); }
- bool isContainedLclVar() const { return isContained() && (OperGet() == GT_LCL_VAR); }
+ bool isContainedLclVar() const { return isContained() && (OperGet() == GT_LCL_VAR); }
+
+ bool isContainedSpillTemp() const;
// Indicates whether it is a memory op.
// Right now it includes Indir and LclField ops.
@@ -503,7 +505,7 @@ public:
bool isContainedMemoryOp() const
{
- return (isContained() && isMemoryOp()) || isContainedLclVar();
+ return (isContained() && isMemoryOp()) || isContainedLclVar() || isContainedSpillTemp();
}
regNumber GetRegNum() const
@@ -665,11 +667,13 @@ public:
#define GTF_REVERSE_OPS 0x00000020 // operand op2 should be evaluated before op1 (normally, op1 is evaluated first and op2 is evaluated second)
#define GTF_REG_VAL 0x00000040 // operand is sitting in a register (or part of a TYP_LONG operand is sitting in a register)
- #define GTF_SPILLED 0x00000080 // the value has been spilled
- #define GTF_SPILLED_OPER 0x00000100 // op1 has been spilled
+ #define GTF_SPILLED 0x00000080 // the value has been spilled
#ifdef LEGACY_BACKEND
- #define GTF_SPILLED_OP2 0x00000200 // op2 has been spilled
+ #define GTF_SPILLED_OPER 0x00000100 // op1 has been spilled
+ #define GTF_SPILLED_OP2 0x00000200 // op2 has been spilled
+#else
+ #define GTF_NOREG_AT_USE 0x00000100 // tree node is in memory at the point of use
#endif // LEGACY_BACKEND
#define GTF_REDINDEX_CHECK 0x00000100 // Used for redundant range checks. Disjoint from GTF_SPILLED_OPER
@@ -1200,7 +1204,7 @@ public:
static
bool OperIsIndir(genTreeOps gtOper)
{
- return gtOper == GT_IND || gtOper == GT_STOREIND || gtOper == GT_NULLCHECK;
+ return gtOper == GT_IND || gtOper == GT_STOREIND || gtOper == GT_NULLCHECK || gtOper == GT_OBJ;
}
bool OperIsIndir() const
@@ -3316,27 +3320,6 @@ protected:
#endif // DEBUGGABLE_GENTREE
};
-// gtObj -- 'object' (GT_OBJ). */
-
-struct GenTreeObj: public GenTreeUnOp
-{
- // The address of the block.
- GenTreePtr& Addr() { return gtOp1; }
-
- CORINFO_CLASS_HANDLE gtClass; // the class of the object
-
- GenTreeObj(var_types type, GenTreePtr addr, CORINFO_CLASS_HANDLE cls) :
- GenTreeUnOp(GT_OBJ, type, addr),
- gtClass(cls)
- {
- gtFlags |= GTF_GLOB_REF; // An Obj is always a global reference.
- }
-
-#if DEBUGGABLE_GENTREE
- GenTreeObj() : GenTreeUnOp() {}
-#endif
-};
-
// Represents a CpObj MSIL Node.
struct GenTreeCpObj : public GenTreeBlkOp
{
@@ -3545,6 +3528,25 @@ protected:
#endif
};
+// gtObj -- 'object' (GT_OBJ). */
+
+struct GenTreeObj: public GenTreeIndir
+{
+ CORINFO_CLASS_HANDLE gtClass; // the class of the object
+
+ GenTreeObj(var_types type, GenTreePtr addr, CORINFO_CLASS_HANDLE cls) :
+ GenTreeIndir(GT_OBJ, type, addr, nullptr),
+ gtClass(cls)
+ {
+ // By default, an OBJ is assumed to be a global reference.
+ gtFlags |= GTF_GLOB_REF;
+ }
+
+#if DEBUGGABLE_GENTREE
+ GenTreeObj() : GenTreeIndir() {}
+#endif
+};
+
// Read-modify-write status of a RMW memory op rooted at a storeInd
enum RMWStatus {
STOREIND_RMW_STATUS_UNKNOWN, // RMW status of storeInd unknown
@@ -4497,6 +4499,19 @@ GenTreeBlkOp::HasGCPtr()
return false;
}
+inline bool GenTree::isContainedSpillTemp() const
+{
+#if !defined(LEGACY_BACKEND)
+ // If spilled and no reg at use, then it is treated as contained.
+ if (((gtFlags & GTF_SPILLED) != 0) &&
+ ((gtFlags & GTF_NOREG_AT_USE) != 0))
+ {
+ return true;
+ }
+#endif //!LEGACY_BACKEND
+
+ return false;
+}
/*****************************************************************************/
diff --git a/src/jit/gtstructs.h b/src/jit/gtstructs.h
index 2f0b3a3936..06c5b9816f 100644
--- a/src/jit/gtstructs.h
+++ b/src/jit/gtstructs.h
@@ -90,7 +90,7 @@ GTSTRUCT_1(AddrMode , GT_LEA)
GTSTRUCT_1(Qmark , GT_QMARK)
GTSTRUCT_1(PhiArg , GT_PHI_ARG)
GTSTRUCT_1(StoreInd , GT_STOREIND)
-GTSTRUCT_2(Indir , GT_STOREIND, GT_IND)
+GTSTRUCT_N(Indir , GT_STOREIND, GT_IND, GT_NULLCHECK, GT_OBJ)
GTSTRUCT_1(PutArgStk , GT_PUTARG_STK)
GTSTRUCT_1(PhysReg , GT_PHYSREG)
GTSTRUCT_3(BlkOp , GT_COPYBLK, GT_INITBLK, GT_COPYOBJ)
diff --git a/src/jit/jitgcinfo.h b/src/jit/jitgcinfo.h
index 480f88491a..e5092cfaa1 100644
--- a/src/jit/jitgcinfo.h
+++ b/src/jit/jitgcinfo.h
@@ -8,11 +8,11 @@
#ifndef _JITGCINFO_H_
#define _JITGCINFO_H_
-#include "gcinfo.h"
+
+#include "gcinfotypes.h"
#ifndef JIT32_GCENCODER
#include "gcinfoencoder.h"
-#include "gcinfotypes.h"
#endif
/*****************************************************************************/
diff --git a/src/jit/lower.h b/src/jit/lower.h
index 4baeb7e1fc..9f62978a62 100644
--- a/src/jit/lower.h
+++ b/src/jit/lower.h
@@ -143,8 +143,62 @@ private:
void TreeNodeInfoInit(GenTreePtr* tree, GenTree* parent);
#if defined(_TARGET_XARCH_)
void TreeNodeInfoInitSimple(GenTree* tree);
- void SetRegOptionalForBinOp(GenTree* tree);
- void TryToSetRegOptional(GenTree* operand);
+
+ //----------------------------------------------------------------------
+ // SetRegOptional - sets a bit to indicate to LSRA that register
+ // for a given tree node is optional for codegen purpose. If no
+ // register is allocated to such a tree node, its parent node treats
+ // it as a contained memory operand during codegen.
+ //
+ // Arguments:
+ // tree - GenTree node
+ //
+ // Returns
+ // None
+ void SetRegOptional(GenTree* tree)
+ {
+ tree->gtLsraInfo.regOptional = true;
+ }
+
+ GenTree* PreferredRegOptionalOperand(GenTree* tree);
+
+ // ------------------------------------------------------------------
+ // SetRegOptionalBinOp - Indicates which of the operands of a bin-op
+ // register requirement is optional. Xarch instruction set allows
+ // either of op1 or op2 of binary operation (e.g. add, mul etc) to be
+ // a memory operand. This routine provides info to register allocator
+ // which of its operands optionally require a register. Lsra might not
+ // allocate a register to RefTypeUse positions of such operands if it
+ // is beneficial. In such a case codegen will treat them as memory
+ // operands.
+ //
+ // Arguments:
+ // tree - Gentree of a bininary operation.
+ //
+ // Returns
+ // None.
+ //
+ // Note: On xarch at most only one of the operands will be marked as
+ // reg optional, even when both operands could be considered register
+ // optional.
+ void SetRegOptionalForBinOp(GenTree* tree)
+ {
+ assert(GenTree::OperIsBinary(tree->OperGet()));
+
+ GenTree* op1 = tree->gtGetOp1();
+ GenTree* op2 = tree->gtGetOp2();
+
+ if (tree->OperIsCommutative() &&
+ tree->TypeGet() == op1->TypeGet())
+ {
+ GenTree* preferredOp = PreferredRegOptionalOperand(tree);
+ SetRegOptional(preferredOp);
+ }
+ else if (tree->TypeGet() == op2->TypeGet())
+ {
+ SetRegOptional(op2);
+ }
+ }
#endif // defined(_TARGET_XARCH_)
void TreeNodeInfoInitReturn(GenTree* tree);
void TreeNodeInfoInitShiftRotate(GenTree* tree);
diff --git a/src/jit/lowerxarch.cpp b/src/jit/lowerxarch.cpp
index d41239cc40..8353f2c81c 100644
--- a/src/jit/lowerxarch.cpp
+++ b/src/jit/lowerxarch.cpp
@@ -561,7 +561,7 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
info->srcCount = 2;
info->dstCount = 0;
- GenTreePtr other = nullptr;
+ GenTreePtr other;
if (CheckImmedAndMakeContained(tree, node->gtIndex))
{
other = node->gtArrLen;
@@ -579,17 +579,17 @@ void Lowering::TreeNodeInfoInit(GenTree* stmt)
other = node->gtArrLen;
}
- if (other->isMemoryOp())
+ if (node->gtIndex->TypeGet() == node->gtArrLen->TypeGet())
{
- if (node->gtIndex->TypeGet() == node->gtArrLen->TypeGet())
+ if (other->isMemoryOp())
{
MakeSrcContained(tree, other);
}
- }
- else
- {
- // since 'other' operand is not contained, we can mark it as reg optional
- TryToSetRegOptional(other);
+ else
+ {
+ // We can mark 'other' as reg optional, since it is not contained.
+ SetRegOptional(other);
+ }
}
}
break;
@@ -2087,7 +2087,8 @@ Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
else
{
// If there are no containable operands, we can make an operand reg optional.
- SetRegOptionalForBinOp(tree);
+ // SSE2 allows only op2 to be a memory-op.
+ SetRegOptional(op2);
}
return;
@@ -2128,7 +2129,8 @@ Lowering::TreeNodeInfoInitModDiv(GenTree* tree)
op2->gtLsraInfo.setSrcCandidates(l, l->allRegs(TYP_INT) & ~(RBM_RAX | RBM_RDX));
// If there are no containable operands, we can make an operand reg optional.
- SetRegOptionalForBinOp(tree);
+ // Div instruction allows only op2 to be a memory op.
+ SetRegOptional(op2);
}
}
@@ -2167,7 +2169,7 @@ Lowering::TreeNodeInfoInitIntrinsic(GenTree* tree)
{
// Mark the operand as reg optional since codegen can still
// generate code if op1 is on stack.
- TryToSetRegOptional(op1);
+ SetRegOptional(op1);
}
break;
@@ -2503,7 +2505,7 @@ Lowering::TreeNodeInfoInitCast(GenTree* tree)
{
// Mark castOp as reg optional to indicate codegen
// can still generate code if it is on stack.
- TryToSetRegOptional(castOp);
+ SetRegOptional(castOp);
}
}
}
@@ -2861,18 +2863,16 @@ void Lowering::LowerCmp(GenTreePtr tree)
{
MakeSrcContained(tree, otherOp);
}
- else if (otherOp->isMemoryOp())
+ else if (otherOp->isMemoryOp() &&
+ ((otherOp == op2) || IsSafeToContainMem(tree, otherOp)))
{
- if ((otherOp == op2) || IsSafeToContainMem(tree, otherOp))
- {
- MakeSrcContained(tree, otherOp);
- }
+ MakeSrcContained(tree, otherOp);
}
else
{
- // Mark otherOp as reg optional to indicate codgen can still generate
- // code even if otherOp is on stack.
- TryToSetRegOptional(otherOp);
+ // SSE2 allows only otherOp to be a memory-op. Since otherOp is not
+ // contained, we can mark it reg-optional.
+ SetRegOptional(otherOp);
}
return;
@@ -2950,6 +2950,7 @@ void Lowering::LowerCmp(GenTreePtr tree)
}
}
}
+
if (op1CanBeContained)
{
if (op1->isMemoryOp())
@@ -2957,7 +2958,9 @@ void Lowering::LowerCmp(GenTreePtr tree)
MakeSrcContained(tree, op1);
}
else
- {
+ {
+ bool op1IsMadeContained = false;
+
// When op1 is a GT_AND we can often generate a single "test" instruction
// instead of two instructions (an "and" instruction followed by a "cmp"/"test")
//
@@ -3083,6 +3086,7 @@ void Lowering::LowerCmp(GenTreePtr tree)
}
// Mark the 'op1' (the GT_AND) operand as contained
MakeSrcContained(tree, op1);
+ op1IsMadeContained = true;
// During Codegen we will now generate "test andOp1, andOp2CnsVal"
}
@@ -3127,8 +3131,8 @@ void Lowering::LowerCmp(GenTreePtr tree)
assert(!castOp1->gtOverflowEx()); // Must not be an overflow checking operation
GenTreePtr removeTreeNode = op1;
- GenTreePtr removeTreeNodeChild = castOp1;
tree->gtOp.gtOp1 = castOp1;
+ op1 = castOp1;
castOp1->gtType = TYP_UBYTE;
// trim down the value if castOp1 is an int constant since its type changed to UBYTE.
@@ -3150,6 +3154,7 @@ void Lowering::LowerCmp(GenTreePtr tree)
if (castOp1->isMemoryOp())
{
MakeSrcContained(tree, op1);
+ op1IsMadeContained = true;
}
}
}
@@ -3163,6 +3168,12 @@ void Lowering::LowerCmp(GenTreePtr tree)
#endif
}
}
+
+ // If not made contained, op1 can be marked as reg-optional.
+ if (!op1IsMadeContained)
+ {
+ SetRegOptional(op1);
+ }
}
}
}
@@ -3181,12 +3192,7 @@ void Lowering::LowerCmp(GenTreePtr tree)
// One of op1 or op2 could be marked as reg optional
// to indicate that codgen can still generate code
// if one of them is on stack.
- TryToSetRegOptional(op2);
-
- if (!op2->IsRegOptional())
- {
- TryToSetRegOptional(op1);
- }
+ SetRegOptional(PreferredRegOptionalOperand(tree));
}
if (varTypeIsSmall(op1Type) && varTypeIsUnsigned(op1Type))
@@ -3717,6 +3723,10 @@ void Lowering::SetMulOpCounts(GenTreePtr tree)
bool requiresOverflowCheck = tree->gtOverflowEx();
bool useLeaEncoding = false;
GenTreePtr memOp = nullptr;
+
+ bool hasImpliedFirstOperand = false;
+ GenTreeIntConCommon* imm = nullptr;
+ GenTreePtr other = nullptr;
// There are three forms of x86 multiply:
// one-op form: RDX:RAX = RAX * r/m
@@ -3740,26 +3750,25 @@ void Lowering::SetMulOpCounts(GenTreePtr tree)
// In LSRA we set the kill set for this operation to RBM_RAX|RBM_RDX
//
info->setDstCandidates(m_lsra,RBM_RAX);
+ hasImpliedFirstOperand = true;
}
else if (tree->gtOper == GT_MULHI)
{
// have to use the encoding:RDX:RAX = RAX * rm
info->setDstCandidates(m_lsra, RBM_RAX);
+ hasImpliedFirstOperand = true;
}
else if (IsContainableImmed(tree, op2) || IsContainableImmed(tree, op1))
{
- GenTreeIntConCommon* imm;
- GenTreePtr other;
-
if (IsContainableImmed(tree, op2))
{
imm = op2->AsIntConCommon();
- other = op1;
+ other = op1;
}
else
{
imm = op1->AsIntConCommon();
- other = op2;
+ other = op2;
}
// CQ: We want to rewrite this into a LEA
@@ -3770,11 +3779,12 @@ void Lowering::SetMulOpCounts(GenTreePtr tree)
}
MakeSrcContained(tree, imm); // The imm is always contained
- if (other->isIndir())
+ if (other->isMemoryOp())
{
memOp = other; // memOp may be contained below
}
}
+
// We allow one operand to be a contained memory operand.
// The memory op type must match with the 'tree' type.
// This is because during codegen we use 'tree' type to derive EmitTypeSize.
@@ -3790,17 +3800,28 @@ void Lowering::SetMulOpCounts(GenTreePtr tree)
//
if (!useLeaEncoding)
{
- if (memOp != nullptr)
+ if ((memOp != nullptr) &&
+ (memOp->TypeGet() == tree->TypeGet()) &&
+ IsSafeToContainMem(tree, memOp))
{
- if ((memOp->TypeGet() == tree->TypeGet()) &&
- IsSafeToContainMem(tree, memOp))
- {
- MakeSrcContained(tree, memOp);
- }
+ MakeSrcContained(tree, memOp);
+ }
+ else if (imm != nullptr)
+ {
+ // Has a contained immediate operand.
+ // Only 'other' operand can be marked as reg optional.
+ assert(other != nullptr);
+ SetRegOptional(other);
+ }
+ else if (hasImpliedFirstOperand)
+ {
+ // Only op2 can be marke as reg optional.
+ SetRegOptional(op2);
}
else
{
- // If there are no containable operands, we can make an operand reg optional.
+ // If there are no containable operands, we can make either of op1 or op2
+ // as reg optional.
SetRegOptionalForBinOp(tree);
}
}
@@ -3869,67 +3890,128 @@ bool Lowering:: IsContainableImmed(GenTree* parentNode, GenTree* childNode)
return true;
}
-//----------------------------------------------------------------------
-// TryToSetRegOptional - sets a bit to indicate to LSRA that register
-// for a given tree node is optional for codegen purpose. If no
-// register is allocated to such a tree node, its parent node treats
-// it as a contained memory operand during codegen.
-//
-// Arguments:
-// tree - GenTree node
+//-----------------------------------------------------------------------
+// PreferredRegOptionalOperand: returns one of the operands of given
+// binary oper that is to be preferred for marking as reg optional.
//
-// Returns
-// None
-//
-// Note: Right now a tree node is marked as reg optional only
-// if is it a GT_LCL_VAR. This routine needs to be modified if
-// in future if lower/codegen needs to support other tree node
-// types.
-void Lowering::TryToSetRegOptional(GenTree* tree)
-{
- if (tree->OperGet() == GT_LCL_VAR)
- {
- tree->gtLsraInfo.regOptional = true;
- }
-}
-
-// ------------------------------------------------------------------
-// SetRegOptionalBinOp - Indicates which of the operands of a bin-op
-// register requirement is optional. Xarch instruction set allows
-// either of op1 or op2 of binary operation (e.g. add, mul etc) to be
-// a memory operand. This routine provides info to register allocator
-// which of its operands optionally require a register. Lsra might not
-// allocate a register to RefTypeUse positions of such operands if it
-// is beneficial. In such a case codegen will treat them as memory
-// operands.
+// Since only one of op1 or op2 can be a memory operand on xarch, only
+// one of them have to be marked as reg optional. Since Lower doesn't
+// know apriori which of op1 or op2 is not likely to get a register, it
+// has to make a guess. This routine encapsulates heuristics that
+// guess whether it is likely to be beneficial to mark op1 or op2 as
+// reg optional.
+//
//
// Arguments:
-// tree - Gentree of a bininary operation.
+// tree - a binary-op tree node that is either commutative
+// or a compare oper.
//
-// Returns
-// None.
-//
-// Note: On xarch at most only one of the operands will be marked as
-// reg optional, even when both operands could be considered register
-// optional.
-void Lowering::SetRegOptionalForBinOp(GenTree* tree)
+// Returns:
+// Returns op1 or op2 of tree node that is preferred for
+// marking as reg optional.
+//
+// Note: if the tree oper is neither commutative nor a compare oper
+// then only op2 can be reg optional on xarch and hence no need to
+// call this routine.
+GenTree* Lowering::PreferredRegOptionalOperand(GenTree* tree)
{
assert(GenTree::OperIsBinary(tree->OperGet()));
+ assert(tree->OperIsCommutative() || tree->OperIsCompare());
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2();
+ GenTree* preferredOp = nullptr;
- if (tree->TypeGet() == op2->TypeGet())
+ // This routine uses the following heuristics:
+ //
+ // a) If both are tracked locals, marking the one with lower weighted
+ // ref count as reg-optional would likely be beneficial as it has
+ // higher probability of not getting a register.
+ //
+ // b) op1 = tracked local and op2 = untracked local: LSRA creates two
+ // ref positions for op2: a def and use position. op2's def position
+ // requires a reg and it is allocated a reg by spilling another
+ // interval (if required) and that could be even op1. For this reason
+ // it is beneficial to mark op1 as reg optional.
+ //
+ // TODO: It is not always mandatory for a def position of an untracked
+ // local to be allocated a register if it is on rhs of an assignment
+ // and its use position is reg-optional and has not been assigned a
+ // register. Reg optional def positions is currently not yet supported.
+ //
+ // c) op1 = untracked local and op2 = tracked local: marking op1 as
+ // reg optional is beneficial, since its use position is less likely
+ // to get a register.
+ //
+ // d) If both are untracked locals (i.e. treated like tree temps by
+ // LSRA): though either of them could be marked as reg optional,
+ // marking op1 as reg optional is likely to be beneficial because
+ // while allocating op2's def position, there is a possibility of
+ // spilling op1's def and in which case op1 is treated as contained
+ // memory operand rather than requiring to reload.
+ //
+ // e) If only one of them is a local var, prefer to mark it as
+ // reg-optional. This is heuristic is based on the results
+ // obtained against CQ perf benchmarks.
+ //
+ // f) If neither of them are local vars (i.e. tree temps), prefer to
+ // mark op1 as reg optional for the same reason as mentioned in (d) above.
+ if (op1->OperGet() == GT_LCL_VAR &&
+ op2->OperGet() == GT_LCL_VAR)
{
- TryToSetRegOptional(op2);
- }
+ LclVarDsc* v1 = comp->lvaTable + op1->AsLclVarCommon()->GetLclNum();
+ LclVarDsc* v2 = comp->lvaTable + op2->AsLclVarCommon()->GetLclNum();
- if (!op2->IsRegOptional() &&
- tree->OperIsCommutative() &&
- tree->TypeGet() == op1->TypeGet())
+ if (v1->lvTracked && v2->lvTracked)
+ {
+ // Both are tracked locals. The one with lower weight is less likely
+ // to get a register and hence beneficial to mark the one with lower
+ // weight as reg optional.
+ if (v1->lvRefCntWtd < v2->lvRefCntWtd)
+ {
+ preferredOp = op1;
+ }
+ else
+ {
+ preferredOp = op2;
+ }
+ }
+ else if (v2->lvTracked)
+ {
+ // v1 is an untracked lcl and it is use position is less likely to
+ // get a register.
+ preferredOp = op1;
+ }
+ else if (v1->lvTracked)
+ {
+ // v2 is an untracked lcl and its def position always
+ // needs a reg. Hence it is better to mark v1 as
+ // reg optional.
+ preferredOp = op1;
+ }
+ else
+ {
+ preferredOp = op1;;
+ }
+ }
+ else if (op1->OperGet() == GT_LCL_VAR)
+ {
+ preferredOp = op1;
+ }
+ else if (op2->OperGet() == GT_LCL_VAR)
{
- TryToSetRegOptional(op1);
+ preferredOp = op2;
}
+ else
+ {
+ // Neither of the operands is a local, prefer marking
+ // operand that is evaluated first as reg optional
+ // since its use position is less likely to get a register.
+ bool reverseOps = ((tree->gtFlags & GTF_REVERSE_OPS) != 0);
+ preferredOp = reverseOps ? op2 : op1;
+ }
+
+ return preferredOp;
}
#endif // _TARGET_XARCH_
diff --git a/src/jit/lsra.cpp b/src/jit/lsra.cpp
index 9be61e4523..266d68ec60 100644
--- a/src/jit/lsra.cpp
+++ b/src/jit/lsra.cpp
@@ -752,6 +752,7 @@ LinearScan::newRefPosition(regNumber reg,
newRP->registerAssignment = mask;
newRP->setMultiRegIdx(0);
+ newRP->setAllocateIfProfitable(0);
associateRefPosWithInterval(newRP);
@@ -835,6 +836,7 @@ LinearScan::newRefPosition(Interval* theInterval,
newRP->registerAssignment = mask;
newRP->setMultiRegIdx(multiRegIdx);
+ newRP->setAllocateIfProfitable(0);
associateRefPosWithInterval(newRP);
@@ -3023,6 +3025,7 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
pos->isLocalDefUse = true;
bool isLastUse = ((tree->gtFlags & GTF_VAR_DEATH) != 0);
pos->lastUse = isLastUse;
+ pos->setAllocateIfProfitable(tree->IsRegOptional());
DBEXEC(VERBOSE, pos->dump());
return;
}
@@ -3216,6 +3219,7 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
prefSrcInterval = i;
}
+ bool regOptionalAtUse = useNode->IsRegOptional();
bool isLastUse = true;
if (isCandidateLocalRef(useNode))
{
@@ -3224,7 +3228,7 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
else
{
// For non-localVar uses we record nothing,
- // as nothing needs to be written back to the tree)
+ // as nothing needs to be written back to the tree.
useNode = nullptr;
}
@@ -3260,7 +3264,15 @@ LinearScan::buildRefPositionsForNode(GenTree *tree,
pos->delayRegFree = true;
}
- if (isLastUse) pos->lastUse = true;
+ if (isLastUse)
+ {
+ pos->lastUse = true;
+ }
+
+ if (regOptionalAtUse)
+ {
+ pos->setAllocateIfProfitable(1);
+ }
}
JITDUMP("\n");
@@ -5145,6 +5157,26 @@ LinearScan::allocateBusyReg(Interval* current,
else
{
isBetterLocation = (nextLocation > farthestLocation);
+
+ if (nextLocation > farthestLocation)
+ {
+ isBetterLocation = true;
+ }
+ else if (nextLocation == farthestLocation)
+ {
+ // Both weight and distance are equal.
+ // Prefer that ref position which is marked both reload and
+ // allocate if profitable. These ref positions don't need
+ // need to be spilled as they are already in memory and
+ // codegen considers them as contained memory operands.
+ isBetterLocation = (recentAssignedRef != nullptr) &&
+ recentAssignedRef->reload &&
+ recentAssignedRef->AllocateIfProfitable();
+ }
+ else
+ {
+ isBetterLocation = false;
+ }
}
}
@@ -7395,7 +7427,11 @@ LinearScan::recordMaxSpill()
void
LinearScan::updateMaxSpill(RefPosition* refPosition)
{
- if (refPosition->spillAfter || refPosition->reload)
+ RefType refType = refPosition->refType;
+
+ if (refPosition->spillAfter ||
+ refPosition->reload ||
+ (refPosition->AllocateIfProfitable() && refPosition->assignedReg() == REG_NA))
{
Interval* interval = refPosition->getInterval();
if (!interval->isLocalVar)
@@ -7406,8 +7442,8 @@ LinearScan::updateMaxSpill(RefPosition* refPosition)
// 8-byte non-GC items, and 16-byte or 32-byte SIMD vectors.
// LSRA is agnostic to those choices but needs
// to know what they are here.
- RefType refType = refPosition->refType;
var_types typ;
+
#if FEATURE_PARTIAL_SIMD_CALLEE_SAVE
if ((refType == RefTypeUpperVectorSaveDef) || (refType == RefTypeUpperVectorSaveUse))
{
@@ -7419,7 +7455,7 @@ LinearScan::updateMaxSpill(RefPosition* refPosition)
GenTreePtr treeNode = refPosition->treeNode;
if (treeNode == nullptr)
{
- assert(RefTypeIsUse(refPosition->refType));
+ assert(RefTypeIsUse(refType));
treeNode = interval->firstRefPosition->treeNode;
}
assert(treeNode != nullptr);
@@ -7451,6 +7487,17 @@ LinearScan::updateMaxSpill(RefPosition* refPosition)
assert(currentSpill[typ] > 0);
currentSpill[typ]--;
}
+ else if (refPosition->AllocateIfProfitable() &&
+ refPosition->assignedReg() == REG_NA)
+ {
+ // A spill temp not getting reloaded into a reg because it is
+ // marked as allocate if profitable and getting used from its
+ // memory location. To properly account max spill for typ we
+ // decrement spill count.
+ assert(RefTypeIsUse(refType));
+ assert(currentSpill[typ] > 0);
+ currentSpill[typ]--;
+ }
JITDUMP(" Max spill for %s is %d\n", varTypeName(typ), maxSpill[typ]);
}
}
@@ -7669,18 +7716,20 @@ LinearScan::resolveRegisters()
if (treeNode == nullptr)
{
// This is either a use, a dead def, or a field of a struct
- Interval * interval = currentRefPosition->getInterval();
+ Interval* interval = currentRefPosition->getInterval();
assert(currentRefPosition->refType == RefTypeUse ||
currentRefPosition->registerAssignment == RBM_NONE ||
interval->isStructField);
+
// TODO-Review: Need to handle the case where any of the struct fields
// are reloaded/spilled at this use
assert(!interval->isStructField ||
(currentRefPosition->reload == false &&
currentRefPosition->spillAfter == false));
+
if (interval->isLocalVar && !interval->isStructField)
{
- LclVarDsc * varDsc = interval->getLocalVar(compiler);
+ LclVarDsc* varDsc = interval->getLocalVar(compiler);
// This must be a dead definition. We need to mark the lclVar
// so that it's not considered a candidate for lvRegister, as
@@ -7688,6 +7737,7 @@ LinearScan::resolveRegisters()
assert(currentRefPosition->refType == RefTypeDef);
varDsc->lvRegNum = REG_STK;
}
+
JITDUMP("No tree node to write back to\n");
continue;
}
@@ -7784,7 +7834,25 @@ LinearScan::resolveRegisters()
if (INDEBUG(alwaysInsertReload() ||)
nextRefPosition->assignedReg() != currentRefPosition->assignedReg())
{
- insertCopyOrReload(treeNode, currentRefPosition->getMultiRegIdx(), nextRefPosition);
+ if (nextRefPosition->assignedReg() != REG_NA)
+ {
+ insertCopyOrReload(treeNode, currentRefPosition->getMultiRegIdx(), nextRefPosition);
+ }
+ else
+ {
+ assert(nextRefPosition->AllocateIfProfitable());
+
+ // In case of tree temps, if def is spilled and use didn't
+ // get a register, set a flag on tree node to be treated as
+ // contained at the point of its use.
+ if (currentRefPosition->spillAfter &&
+ currentRefPosition->refType == RefTypeDef &&
+ nextRefPosition->refType == RefTypeUse)
+ {
+ assert(nextRefPosition->treeNode == nullptr);
+ treeNode->gtFlags |= GTF_NOREG_AT_USE;
+ }
+ }
}
}
diff --git a/src/jit/lsra.h b/src/jit/lsra.h
index 11af9be3aa..9ce2bd79c7 100644
--- a/src/jit/lsra.h
+++ b/src/jit/lsra.h
@@ -1372,22 +1372,29 @@ public:
) && !AllocateIfProfitable();
}
- // Returns true whether this ref position is to be allocated
- // a reg only if it is profitable. Currently these are the
+ // Indicates whether this ref position is to be allocated
+ // a reg only if profitable. Currently these are the
// ref positions that lower/codegen has indicated as reg
// optional and is considered a contained memory operand if
// no reg is allocated.
+ unsigned allocRegIfProfitable : 1;
+
+ void setAllocateIfProfitable(unsigned val)
+ {
+ allocRegIfProfitable = val;
+ }
+
+ // Returns true whether this ref position is to be allocated
+ // a reg only if it is profitable.
bool AllocateIfProfitable()
{
// TODO-CQ: Right now if a ref position is marked as
// copyreg or movereg, then it is not treated as
// 'allocate if profitable'. This is an implementation
// limitation that needs to be addressed.
- return (refType == RefTypeUse) &&
- !copyReg &&
- !moveReg &&
- (treeNode != nullptr) &&
- treeNode->IsRegOptional();
+ return allocRegIfProfitable &&
+ !copyReg &&
+ !moveReg;
}
// Used by RefTypeDef/Use positions of a multi-reg call node.
diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp
index e24e8ae7cd..95d25e30ea 100755
--- a/src/jit/morph.cpp
+++ b/src/jit/morph.cpp
@@ -4795,29 +4795,30 @@ Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call,
// See if we need to insert a copy at all
// Case 1: don't need a copy if it is the last use of a local. We can't determine that all of the time
// but if there is only one use and no loops, the use must be last.
- if (argx->gtOper == GT_OBJ)
+ GenTreeLclVarCommon* lcl = nullptr;
+ if ((argx->OperGet() == GT_OBJ) && argx->AsObj()->Addr()->OperIsLocal())
+ {
+ lcl = argx->AsObj()->Addr()->AsLclVarCommon();
+ }
+ if (lcl != nullptr)
{
- GenTree* lcl = argx->gtOp.gtOp1;
- if (lcl->OperIsLocal())
+ unsigned varNum = lcl->AsLclVarCommon()->GetLclNum();
+ if (lvaIsImplicitByRefLocal(varNum))
{
- unsigned varNum = lcl->AsLclVarCommon()->GetLclNum();
- if (lvaIsImplicitByRefLocal(varNum))
+ LclVarDsc* varDsc = &lvaTable[varNum];
+ // JIT_TailCall helper has an implicit assumption that all tail call arguments live
+ // on the caller's frame. If an argument lives on the caller caller's frame, it may get
+ // overwritten if that frame is reused for the tail call. Therefore, we should always copy
+ // struct parameters if they are passed as arguments to a tail call.
+ if (!call->IsTailCallViaHelper() && (varDsc->lvRefCnt == 1) && !fgMightHaveLoop())
{
- LclVarDsc* varDsc = &lvaTable[varNum];
- // JIT_TailCall helper has an implicit assumption that all tail call arguments live
- // on the caller's frame. If an argument lives on the caller caller's frame, it may get
- // overwritten if that frame is reused for the tail call. Therefore, we should always copy
- // struct parameters if they are passed as arguments to a tail call.
- if (!call->IsTailCallViaHelper() && (varDsc->lvRefCnt == 1) && !fgMightHaveLoop())
- {
- varDsc->lvRefCnt = 0;
- args->gtOp.gtOp1 = lcl;
- fgArgTabEntryPtr fp = Compiler::gtArgEntryByNode(call, argx);
- fp->node = lcl;
+ varDsc->lvRefCnt = 0;
+ args->gtOp.gtOp1 = lcl;
+ fgArgTabEntryPtr fp = Compiler::gtArgEntryByNode(call, argx);
+ fp->node = lcl;
- JITDUMP("did not have to make outgoing copy for V%2d", varNum);
- return;
- }
+ JITDUMP("did not have to make outgoing copy for V%2d", varNum);
+ return;
}
}
}
@@ -5912,13 +5913,14 @@ GenTreePtr Compiler::fgMorphField(GenTreePtr tree, MorphAddrContext* ma
lclNum = objRef->gtLclVarCommon.gtLclNum;
}
- // Create the "nullchk" node
- nullchk = gtNewOperNode(GT_NULLCHECK,
- TYP_BYTE, // Make it TYP_BYTE so we only deference it for 1 byte.
- gtNewLclvNode(lclNum, objRefType));
+ // Create the "nullchk" node.
+ // Make it TYP_BYTE so we only deference it for 1 byte.
+ GenTreePtr lclVar = gtNewLclvNode(lclNum, objRefType);
+ nullchk = new(this, GT_NULLCHECK) GenTreeIndir(GT_NULLCHECK, TYP_BYTE, lclVar, nullptr);
+
nullchk->gtFlags |= GTF_DONT_CSE; // Don't try to create a CSE for these TYP_BYTE indirections
- /* An indirection will cause a GPF if the address is null */
+ // An indirection will cause a GPF if the address is null.
nullchk->gtFlags |= GTF_EXCEPT;
if (asg)
@@ -6423,12 +6425,15 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee)
}
// Get the size of the struct and see if it is register passable.
+ CORINFO_CLASS_HANDLE objClass = nullptr;
+
if (argx->OperGet() == GT_OBJ)
{
+ objClass = argx->AsObj()->gtClass;
#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
unsigned typeSize = 0;
- hasMultiByteArgs = !VarTypeIsMultiByteAndCanEnreg(argx->TypeGet(), argx->gtObj.gtClass, &typeSize, false);
+ hasMultiByteArgs = !VarTypeIsMultiByteAndCanEnreg(argx->TypeGet(), objClass, &typeSize, false);
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING) || defined(_TARGET_ARM64_)
// On System V/arm64 the args could be a 2 eightbyte struct that is passed in two registers.
@@ -16166,7 +16171,7 @@ void Compiler::fgMarkImplicitByRefArgs()
* Morph irregular parameters
* for x64 and ARM64 this means turning them into byrefs, adding extra indirs.
*/
-bool Compiler::fgMorphImplicitByRefArgs(GenTreePtr tree, fgWalkData* fgWalkPre)
+bool Compiler::fgMorphImplicitByRefArgs(GenTreePtr *pTree, fgWalkData* fgWalkPre)
{
#if !defined(_TARGET_AMD64_) && !defined(_TARGET_ARM64_)
@@ -16174,6 +16179,7 @@ bool Compiler::fgMorphImplicitByRefArgs(GenTreePtr tree, fgWalkData* fgWalkPre)
#else // _TARGET_AMD64_ || _TARGET_ARM64_
+ GenTree* tree = *pTree;
assert((tree->gtOper == GT_LCL_VAR) ||
((tree->gtOper == GT_ADDR) && (tree->gtOp.gtOp1->gtOper == GT_LCL_VAR)));
@@ -16201,6 +16207,9 @@ bool Compiler::fgMorphImplicitByRefArgs(GenTreePtr tree, fgWalkData* fgWalkPre)
// We are overloading the lvRefCnt field here because real ref counts have not been set.
lclVarDsc->lvRefCnt++;
+ // This is no longer a def of the lclVar, even if it WAS a def of the struct.
+ lclVarTree->gtFlags &= ~(GTF_LIVENESS_MASK);
+
if (isAddr)
{
// change &X into just plain X
@@ -16245,6 +16254,7 @@ bool Compiler::fgMorphImplicitByRefArgs(GenTreePtr tree, fgWalkData* fgWalkPre)
#endif // DEBUG
}
+ *pTree = tree;
return true;
#endif // _TARGET_AMD64_ || _TARGET_ARM64_
@@ -16446,7 +16456,7 @@ Compiler::fgWalkResult Compiler::fgMarkAddrTakenLocalsPreCB(GenTreePtr* pTr
// If we have ADDR(lcl), and "lcl" is an implicit byref parameter, fgMorphImplicitByRefArgs will
// convert to just "lcl". This is never an address-context use, since the local is already a
// byref after this transformation.
- if (tree->gtOp.gtOp1->OperGet() == GT_LCL_VAR && comp->fgMorphImplicitByRefArgs(tree, fgWalkPre))
+ if (tree->gtOp.gtOp1->OperGet() == GT_LCL_VAR && comp->fgMorphImplicitByRefArgs(pTree, fgWalkPre))
{
// Push something to keep the PostCB, which will pop it, happy.
axcStack->Push(AXC_None);
@@ -16547,7 +16557,7 @@ Compiler::fgWalkResult Compiler::fgMarkAddrTakenLocalsPreCB(GenTreePtr* pTr
case GT_LCL_VAR:
// On some architectures, some arguments are passed implicitly by reference.
// Modify the trees to reflect that, if this local is one of those.
- if (comp->fgMorphImplicitByRefArgs(tree, fgWalkPre))
+ if (comp->fgMorphImplicitByRefArgs(pTree, fgWalkPre))
{
// We can't be in an address context; the ADDR(lcl), where lcl is an implicit byref param, was
// handled earlier. (And we can't have added anything to this address, since it was implicit.)
diff --git a/src/jit/optcse.cpp b/src/jit/optcse.cpp
index fa19445aa5..c424e7e178 100644
--- a/src/jit/optcse.cpp
+++ b/src/jit/optcse.cpp
@@ -313,17 +313,24 @@ void Compiler::optCSE_GetMaskData(GenTreePtr tree, optCSE_MaskDat
}
-// Given a binary tree node return true if it is safe to swap the order of evaluation for op1 and op2
-// It only considers the locations of the CSE defs and uses for op1 and op2 to decide this
+//------------------------------------------------------------------------
+// optCSE_canSwap: Determine if the execution order of two nodes can be swapped.
//
-bool Compiler::optCSE_canSwap(GenTreePtr tree)
+// Arguments:
+// op1 - The first node
+// op2 - The second node
+//
+// Return Value:
+// Return true iff it safe to swap the execution order of 'op1' and 'op2',
+// considering only the locations of the CSE defs and uses.
+//
+// Assumptions:
+// 'op1' currently occurse before 'op2' in the execution order.
+//
+bool Compiler::optCSE_canSwap(GenTree* op1, GenTree* op2)
{
- assert((tree->OperKind() & GTK_SMPOP) != 0);
-
- GenTreePtr op1 = tree->gtOp.gtOp1;
- GenTreePtr op2 = tree->gtGetOp2();
-
- assert(op1 != nullptr); // We must have a binary treenode with non-null op1 and op2
+ // op1 and op2 must be non-null.
+ assert(op1 != nullptr);
assert(op2 != nullptr);
bool canSwap = true; // the default result unless proven otherwise.
@@ -341,7 +348,7 @@ bool Compiler::optCSE_canSwap(GenTreePtr tree)
}
else
{
- // We also cannot swap if op2 contains a CSE def that is used by op1
+ // We also cannot swap if op2 contains a CSE def that is used by op1.
if ((op2MaskData.CSE_defMask & op1MaskData.CSE_useMask) != 0)
{
canSwap = false;
@@ -351,6 +358,26 @@ bool Compiler::optCSE_canSwap(GenTreePtr tree)
return canSwap;
}
+//------------------------------------------------------------------------
+// optCSE_canSwap: Determine if the execution order of a node's operands can be swapped.
+//
+// Arguments:
+// tree - The node of interest
+//
+// Return Value:
+// Return true iff it safe to swap the execution order of the operands of 'tree',
+// considering only the locations of the CSE defs and uses.
+//
+bool Compiler::optCSE_canSwap(GenTreePtr tree)
+{
+ // We must have a binary treenode with non-null op1 and op2
+ assert((tree->OperKind() & GTK_SMPOP) != 0);
+
+ GenTreePtr op1 = tree->gtOp.gtOp1;
+ GenTreePtr op2 = tree->gtGetOp2();
+
+ return optCSE_canSwap(op1, op2);
+}
/*****************************************************************************
*
diff --git a/src/pal/src/exception/seh-unwind.cpp b/src/pal/src/exception/seh-unwind.cpp
index c00be51a5b..24eebbbf94 100644
--- a/src/pal/src/exception/seh-unwind.cpp
+++ b/src/pal/src/exception/seh-unwind.cpp
@@ -26,8 +26,7 @@ Abstract:
#include "pal/context.h"
#include "pal.h"
#include <dlfcn.h>
-#include <exception>
-
+
#if HAVE_LIBUNWIND_H
#ifndef __linux__
#define UNW_LOCAL_ONLY
diff --git a/src/pal/src/exception/seh.cpp b/src/pal/src/exception/seh.cpp
index 5aaa18f65a..5320ecd087 100644
--- a/src/pal/src/exception/seh.cpp
+++ b/src/pal/src/exception/seh.cpp
@@ -39,7 +39,37 @@ Abstract:
#include <unistd.h>
#include <pthread.h>
#include <stdlib.h>
-#include <utility>
+
+// Define the std::move so that we don't have to include the <utility> header
+// which on some platforms pulls in STL stuff that collides with PAL stuff.
+// The std::move is needed to enable using move constructor and assignment operator
+// for PAL_SEHException.
+namespace std
+{
+ template<typename T>
+ struct remove_reference
+ {
+ typedef T type;
+ };
+
+ template<typename T>
+ struct remove_reference<T&>
+ {
+ typedef T type;
+ };
+
+ template<typename T>
+ struct remove_reference<T&&>
+ {
+ typedef T type;
+ };
+
+ template<class T> inline
+ typename remove_reference<T>::type&& move(T&& arg)
+ { // forward arg as movable
+ return ((typename remove_reference<T>::type&&)arg);
+ }
+}
using namespace CorUnix;
diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt
index b24a1509e6..89a6437da1 100644
--- a/src/vm/CMakeLists.txt
+++ b/src/vm/CMakeLists.txt
@@ -151,7 +151,6 @@ set(VM_SOURCES_WKS
coverage.cpp
customattribute.cpp
custommarshalerinfo.cpp
- dbggcinfodecoder.cpp
dllimportcallback.cpp
eeconfig.cpp
eecontract.cpp
@@ -430,7 +429,6 @@ endif(CLR_CMAKE_PLATFORM_UNIX)
set(VM_SOURCES_DAC_ARCH
gcinfodecoder.cpp
- dbggcinfodecoder.cpp
exceptionhandling.cpp
)
diff --git a/src/vm/codeman.cpp b/src/vm/codeman.cpp
index 7eea254646..b66215f5f2 100644
--- a/src/vm/codeman.cpp
+++ b/src/vm/codeman.cpp
@@ -3015,7 +3015,7 @@ void * EEJitManager::allocCodeFragmentBlock(size_t blockSize, unsigned alignment
#endif // !DACCESS_COMPILE
-PTR_VOID EEJitManager::GetGCInfo(const METHODTOKEN& MethodToken)
+GCInfoToken EEJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken)
{
CONTRACTL {
NOTHROW;
@@ -3024,7 +3024,8 @@ PTR_VOID EEJitManager::GetGCInfo(const METHODTOKEN& MethodToken)
SUPPORTS_DAC;
} CONTRACTL_END;
- return GetCodeHeader(MethodToken)->GetGCInfo();
+ // The JIT-ed code always has the current version of GCInfo
+ return{ GetCodeHeader(MethodToken)->GetGCInfo(), GCINFO_VERSION };
}
// creates an enumeration and returns the number of EH clauses
@@ -5035,7 +5036,7 @@ NativeImageJitManager::NativeImageJitManager()
#endif // #ifndef DACCESS_COMPILE
-PTR_VOID NativeImageJitManager::GetGCInfo(const METHODTOKEN& MethodToken)
+GCInfoToken NativeImageJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken)
{
CONTRACTL {
NOTHROW;
@@ -5060,7 +5061,8 @@ PTR_VOID NativeImageJitManager::GetGCInfo(const METHODTOKEN& MethodToken)
PTR_VOID pUnwindData = GetUnwindDataBlob(baseAddress, pRuntimeFunction, &nUnwindDataSize);
// GCInfo immediatelly follows unwind data
- return dac_cast<PTR_BYTE>(pUnwindData) + nUnwindDataSize;
+ // GCInfo from an NGEN-ed image is always the current version
+ return{ dac_cast<PTR_BYTE>(pUnwindData) + nUnwindDataSize, GCINFO_VERSION };
}
unsigned NativeImageJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)
@@ -5681,7 +5683,7 @@ void NativeImageJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& Method
//
methodRegionInfo->hotStartAddress = JitTokenToStartAddress(MethodToken);
- methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfo(MethodToken));
+ methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfoToken(MethodToken));
methodRegionInfo->coldStartAddress = 0;
methodRegionInfo->coldSize = 0;
@@ -6274,14 +6276,17 @@ PTR_MethodDesc MethodIterator::GetMethodDesc()
return NativeUnwindInfoLookupTable::GetMethodDesc(m_pNgenLayout, GetRuntimeFunction(), m_ModuleBase);
}
-PTR_VOID MethodIterator::GetGCInfo()
+GCInfoToken MethodIterator::GetGCInfoToken()
{
LIMITED_METHOD_CONTRACT;
// get the gc info from the RT function
SIZE_T size;
PTR_VOID pUnwindData = GetUnwindDataBlob(m_ModuleBase, GetRuntimeFunction(), &size);
- return (PTR_VOID)((PTR_BYTE)pUnwindData + size);
+ PTR_VOID gcInfo = (PTR_VOID)((PTR_BYTE)pUnwindData + size);
+ // MethodIterator is used to iterate over methods of an NgenImage.
+ // So, GcInfo version is always current
+ return{ gcInfo, GCINFO_VERSION };
}
TADDR MethodIterator::GetMethodStartAddress()
@@ -6359,8 +6364,8 @@ void MethodIterator::GetMethodRegionInfo(IJitManager::MethodRegionInfo *methodRe
methodRegionInfo->hotStartAddress = GetMethodStartAddress();
methodRegionInfo->coldStartAddress = GetMethodColdStartAddress();
-
- methodRegionInfo->hotSize = ExecutionManager::GetNativeImageJitManager()->GetCodeManager()->GetFunctionSize(GetGCInfo());
+ GCInfoToken gcInfoToken = GetGCInfoToken();
+ methodRegionInfo->hotSize = ExecutionManager::GetNativeImageJitManager()->GetCodeManager()->GetFunctionSize(gcInfoToken);
methodRegionInfo->coldSize = 0;
if (methodRegionInfo->coldStartAddress != NULL)
@@ -6408,6 +6413,24 @@ ReadyToRunInfo * ReadyToRunJitManager::JitTokenToReadyToRunInfo(const METHODTOKE
return dac_cast<PTR_Module>(MethodToken.m_pRangeSection->pHeapListOrZapModule)->GetReadyToRunInfo();
}
+UINT32 ReadyToRunJitManager::JitTokenToGCInfoVersion(const METHODTOKEN& MethodToken)
+{
+ CONTRACTL{
+ NOTHROW;
+ GC_NOTRIGGER;
+ HOST_NOCALLS;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ READYTORUN_HEADER * header = JitTokenToReadyToRunInfo(MethodToken)->GetImage()->GetReadyToRunHeader();
+ UINT32 gcInfoVersion = header->MajorVersion;
+
+ // Currently there's only one version of GCInfo.
+ _ASSERTE(gcInfoVersion == GCINFO_VERSION);
+
+ return gcInfoVersion;
+}
+
PTR_RUNTIME_FUNCTION ReadyToRunJitManager::JitTokenToRuntimeFunction(const METHODTOKEN& MethodToken)
{
CONTRACTL {
@@ -6433,7 +6456,7 @@ TADDR ReadyToRunJitManager::JitTokenToStartAddress(const METHODTOKEN& MethodToke
RUNTIME_FUNCTION__BeginAddress(dac_cast<PTR_RUNTIME_FUNCTION>(MethodToken.m_pCodeHeader));
}
-PTR_VOID ReadyToRunJitManager::GetGCInfo(const METHODTOKEN& MethodToken)
+GCInfoToken ReadyToRunJitManager::GetGCInfoToken(const METHODTOKEN& MethodToken)
{
CONTRACTL {
NOTHROW;
@@ -6458,7 +6481,10 @@ PTR_VOID ReadyToRunJitManager::GetGCInfo(const METHODTOKEN& MethodToken)
PTR_VOID pUnwindData = GetUnwindDataBlob(baseAddress, pRuntimeFunction, &nUnwindDataSize);
// GCInfo immediatelly follows unwind data
- return dac_cast<PTR_BYTE>(pUnwindData) + nUnwindDataSize;
+ PTR_BYTE gcInfo = dac_cast<PTR_BYTE>(pUnwindData) + nUnwindDataSize;
+ UINT32 gcInfoVersion = JitTokenToGCInfoVersion(MethodToken);
+
+ return{ gcInfo, gcInfoVersion };
}
unsigned ReadyToRunJitManager::InitializeEHEnumeration(const METHODTOKEN& MethodToken, EH_CLAUSE_ENUMERATOR* pEnumState)
@@ -6863,7 +6889,7 @@ void ReadyToRunJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodT
// READYTORUN: FUTURE: Hot-cold spliting
methodRegionInfo->hotStartAddress = JitTokenToStartAddress(MethodToken);
- methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfo(MethodToken));
+ methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfoToken(MethodToken));
methodRegionInfo->coldStartAddress = 0;
methodRegionInfo->coldSize = 0;
}
diff --git a/src/vm/codeman.h b/src/vm/codeman.h
index 855c15125a..ae86a25e93 100644
--- a/src/vm/codeman.h
+++ b/src/vm/codeman.h
@@ -24,9 +24,10 @@ Abstract:
An IJitManager knows about which method bodies live in each RangeSection.
It can handle methods of one given CodeType. It can map a method body to
a MethodDesc. It knows where the GCInfo about the method lives.
- Today, we have 2 IJitManagers viz.
+ Today, we have three IJitManagers viz.
1. EEJitManager for JITcompiled code generated by clrjit.dll
2. NativeImageJitManager for ngenned code.
+ 3. ReadyToRunJitManager for version resiliant ReadyToRun code
An ICodeManager knows how to crack a specific format of GCInfo. There is
a default format (handled by ExecutionManager::GetDefaultCodeManager())
@@ -66,6 +67,7 @@ Abstract:
#include "debuginfostore.h"
#include "shash.h"
#include "pedecoder.h"
+#include "gcinfo.h"
class MethodDesc;
class ICorJitCompiler;
@@ -113,6 +115,7 @@ enum StubCodeBlockKind : int
// Method header which exists just before the code.
// Every IJitManager could have its own format for the header.
// Today CodeHeader is used by the EEJitManager.
+// The GCInfo version is always current GCINFO_VERSION in this header.
#ifdef USE_INDIRECT_CODEHEADER
typedef DPTR(struct _hpRealCodeHdr) PTR_RealCodeHeader;
@@ -735,7 +738,11 @@ public:
CrawlFrame *pCf)=0;
#endif // #ifndef DACCESS_COMPILE
- virtual PTR_VOID GetGCInfo(const METHODTOKEN& MethodToken)=0;
+ virtual GCInfoToken GetGCInfoToken(const METHODTOKEN& MethodToken)=0;
+ PTR_VOID GetGCInfo(const METHODTOKEN& MethodToken)
+ {
+ return GetGCInfoToken(MethodToken).Info;
+ }
TADDR JitTokenToModuleBase(const METHODTOKEN& MethodToken);
@@ -965,7 +972,7 @@ public:
virtual TypeHandle ResolveEHClause(EE_ILEXCEPTION_CLAUSE* pEHClause,
CrawlFrame *pCf);
#endif // !DACCESS_COMPILE
- PTR_VOID GetGCInfo(const METHODTOKEN& MethodToken);
+ GCInfoToken GetGCInfoToken(const METHODTOKEN& MethodToken);
#endif // !CROSSGEN_COMPILE
#if !defined DACCESS_COMPILE && !defined CROSSGEN_COMPILE
void RemoveJitData(CodeHeader * pCHdr, size_t GCinfo_len, size_t EHinfo_len);
@@ -1486,7 +1493,7 @@ inline void EEJitManager::JitTokenToMethodRegionInfo(const METHODTOKEN& MethodTo
} CONTRACTL_END;
methodRegionInfo->hotStartAddress = JitTokenToStartAddress(MethodToken);
- methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfo(MethodToken));
+ methodRegionInfo->hotSize = GetCodeManager()->GetFunctionSize(GetGCInfoToken(MethodToken));
methodRegionInfo->coldStartAddress = 0;
methodRegionInfo->coldSize = 0;
}
@@ -1543,7 +1550,7 @@ public:
CrawlFrame *pCf);
#endif // #ifndef DACCESS_COMPILE
- virtual PTR_VOID GetGCInfo(const METHODTOKEN& MethodToken);
+ virtual GCInfoToken GetGCInfoToken(const METHODTOKEN& MethodToken);
#if defined(WIN64EXCEPTIONS)
virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo);
@@ -1638,6 +1645,8 @@ public:
virtual PCODE GetCodeAddressForRelOffset(const METHODTOKEN& MethodToken, DWORD relOffset);
static ReadyToRunInfo * JitTokenToReadyToRunInfo(const METHODTOKEN& MethodToken);
+ static UINT32 JitTokenToGCInfoVersion(const METHODTOKEN& MethodToken);
+
static PTR_RUNTIME_FUNCTION JitTokenToRuntimeFunction(const METHODTOKEN& MethodToken);
virtual TADDR JitTokenToStartAddress(const METHODTOKEN& MethodToken);
@@ -1653,7 +1662,7 @@ public:
CrawlFrame *pCf);
#endif // #ifndef DACCESS_COMPILE
- virtual PTR_VOID GetGCInfo(const METHODTOKEN& MethodToken);
+ virtual GCInfoToken GetGCInfoToken(const METHODTOKEN& MethodToken);
#if defined(WIN64EXCEPTIONS)
virtual PTR_RUNTIME_FUNCTION LazyGetFunctionEntry(EECodeInfo * pCodeInfo);
@@ -1754,10 +1763,15 @@ public:
return m_relOffset;
}
- PTR_VOID GetGCInfo()
+ GCInfoToken GetGCInfoToken()
{
WRAPPER_NO_CONTRACT;
- return GetJitManager()->GetGCInfo(GetMethodToken());
+ return GetJitManager()->GetGCInfoToken(GetMethodToken());
+ }
+
+ PTR_VOID GetGCInfo()
+ {
+ return GetGCInfoToken().Info;
}
void GetMethodRegionInfo(IJitManager::MethodRegionInfo *methodRegionInfo)
@@ -1824,7 +1838,7 @@ class MethodSectionIterator;
//
// MethodIterator class is used to iterate all the methods in an ngen image.
// It will match and report hot (and cold, if any) sections of a method at the same time.
-//
+// GcInfo version is always current
class MethodIterator
{
public:
@@ -1852,7 +1866,7 @@ private:
BOOL Next();
PTR_MethodDesc GetMethodDesc();
- PTR_VOID GetGCInfo();
+ GCInfoToken GetGCInfoToken();
TADDR GetMethodStartAddress();
TADDR GetMethodColdStartAddress();
ULONG GetHotCodeSize();
diff --git a/src/vm/crossgen/CMakeLists.txt b/src/vm/crossgen/CMakeLists.txt
index 3a20675ef1..c2392a2d9a 100644
--- a/src/vm/crossgen/CMakeLists.txt
+++ b/src/vm/crossgen/CMakeLists.txt
@@ -92,7 +92,6 @@ set(VM_CROSSGEN_SOURCES
../vars.cpp
../versionresilienthashcode.cpp
../zapsig.cpp
- ../dbggcinfodecoder.cpp
../gcinfodecoder.cpp
../sha1.cpp
../crossgencompile.cpp
diff --git a/src/vm/crossgen/wks_crossgen.nativeproj b/src/vm/crossgen/wks_crossgen.nativeproj
index 31404a3d66..803a3cb705 100644
--- a/src/vm/crossgen/wks_crossgen.nativeproj
+++ b/src/vm/crossgen/wks_crossgen.nativeproj
@@ -130,7 +130,6 @@
<!-- SOURCES_NONPAL -->
<ItemGroup>
- <CppCompile Include="$(VmSourcesDir)\DbgGcInfoDecoder.cpp" />
<CppCompile Include="$(VmSourcesDir)\GcInfoDecoder.cpp" />
<CppCompile Include="$(VmSourcesDir)\Crypto\SHA1.cpp" Condition="'$(FeatureCoreclr)' != 'true'"/>
<CppCompile Include="$(VmSourcesDir)\SHA1.cpp" Condition="'$(FeatureCoreclr)' == 'true'"/>
diff --git a/src/vm/dac/dacwks.targets b/src/vm/dac/dacwks.targets
index 121b14ec90..82ab5439d5 100644
--- a/src/vm/dac/dacwks.targets
+++ b/src/vm/dac/dacwks.targets
@@ -127,7 +127,6 @@
<ItemGroup Condition="'$(TargetArch)' == 'amd64'" >
<CppCompile Include="$(ClrSrcDirectory)\vm\GcInfoDecoder.cpp" />
- <CppCompile Include="$(ClrSrcDirectory)\vm\DbgGcInfoDecoder.cpp" />
<CppCompile Include="$(ClrSrcDirectory)\vm\amd64\cGenAMD64.cpp" />
<CppCompile Include="$(ClrSrcDirectory)\vm\amd64\ExcepAMD64.cpp" />
<CppCompile Include="$(ClrSrcDirectory)\vm\amd64\gmsAMD64.cpp" />
diff --git a/src/vm/dbggcinfodecoder.cpp b/src/vm/dbggcinfodecoder.cpp
deleted file mode 100644
index c921256ef9..0000000000
--- a/src/vm/dbggcinfodecoder.cpp
+++ /dev/null
@@ -1,932 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-
-
-
-#include "common.h"
-#include "gcinfodecoder.h"
-
-#ifdef VERIFY_GCINFO
-#ifdef USE_GC_INFO_DECODER
-
-#include "dbggcinfodecoder.h"
-
-#ifndef GCINFODECODER_CONTRACT
-#define GCINFODECODER_CONTRACT(contract) contract
-#endif // !GCINFODECODER_CONTRACT
-
-#ifndef GET_CALLER_SP
-#define GET_CALLER_SP(pREGDISPLAY) EECodeManager::GetCallerSp(pREGDISPLAY)
-#endif // !GET_CALLER_SP
-
-#ifndef VALIDATE_OBJECTREF
-#ifdef DACCESS_COMPILE
-#define VALIDATE_OBJECTREF(objref, fDeep)
-#else // DACCESS_COMPILE
-#define VALIDATE_OBJECTREF(objref, fDeep) OBJECTREFToObject(objref)->Validate(fDeep)
-#endif // DACCESS_COMPILE
-#endif // !VALIDATE_OBJECTREF
-
-#ifndef VALIDATE_ROOT
-#define VALIDATE_ROOT(isInterior, hCallBack, pObjRef) \
- do { \
- /* Only call Object::Validate() with bDeep == TRUE if we are in the promote phase. */ \
- /* We should call Validate() with bDeep == FALSE if we are in the relocation phase. */ \
- \
- GCCONTEXT* pGCCtx = (GCCONTEXT*)(hCallBack); \
- \
- if (!(isInterior) && !(m_Flags & DECODE_NO_VALIDATION)) \
- VALIDATE_OBJECTREF(*(pObjRef), pGCCtx->sc->promotion == TRUE); \
- } while (0)
-#endif // !VALIDATE_ROOT
-
-
-
-namespace DbgGcInfo {
-
-
-//static
-bool GcInfoDecoder::SetIsInterruptibleCB (UINT32 startOffset, UINT32 stopOffset, LPVOID hCallback)
-{
- GcInfoDecoder *pThis = (GcInfoDecoder*)hCallback;
-
- bool fStop = pThis->m_InstructionOffset >= startOffset && pThis->m_InstructionOffset < stopOffset;
-
- if (fStop)
- pThis->m_IsInterruptible = true;
-
- return fStop;
-}
-
-
-GcInfoDecoder::GcInfoDecoder(
- const BYTE* gcInfoAddr,
- GcInfoDecoderFlags flags,
- UINT32 breakOffset
- )
- : m_Reader( gcInfoAddr )
- , m_InstructionOffset( breakOffset )
- , m_IsInterruptible( false )
- , m_pLiveRegisters( NULL )
- , m_pLiveStackSlots( NULL )
- , m_NumLiveRegisters(0)
- , m_NumLiveStackSlots(0)
-#ifdef _DEBUG
- , m_Flags( flags )
-#endif
-{
-#ifdef _TARGET_ARM_
- _ASSERTE(!"JIT32 is not generating GCInfo in the correct format yet!");
-#endif
-
- _ASSERTE( (flags & (DECODE_INTERRUPTIBILITY | DECODE_GC_LIFETIMES)) || (0 == breakOffset) );
-
- // The current implementation doesn't support the two flags together
- _ASSERTE(
- ((flags & (DECODE_INTERRUPTIBILITY | DECODE_GC_LIFETIMES)) != (DECODE_INTERRUPTIBILITY | DECODE_GC_LIFETIMES))
- );
-
-
- //--------------------------------------------
- // Pre-decode information
- //--------------------------------------------
-
- m_IsVarArg = (m_Reader.Read(1)) ? true : false;
-
- size_t hasSecurityObject = m_Reader.Read(1);
- if(hasSecurityObject)
- m_SecurityObjectStackSlot = (INT32) DENORMALIZE_STACK_SLOT(m_Reader.DecodeVarLengthSigned(SECURITY_OBJECT_STACK_SLOT_ENCBASE));
- else
- m_SecurityObjectStackSlot = NO_SECURITY_OBJECT;
-
- size_t hasPSPSym = m_Reader.Read(1);
- if(hasPSPSym)
- {
- m_PSPSymStackSlot = (INT32) DENORMALIZE_STACK_SLOT(m_Reader.DecodeVarLengthSigned(PSP_SYM_STACK_SLOT_ENCBASE));
- }
- else
- {
- m_PSPSymStackSlot = NO_PSP_SYM;
- }
-
- size_t hasGenericsInstContext = m_Reader.Read(1);
- if(hasGenericsInstContext)
- {
- m_GenericsInstContextStackSlot = (INT32) DENORMALIZE_STACK_SLOT(m_Reader.DecodeVarLengthSigned(GENERICS_INST_CONTEXT_STACK_SLOT_ENCBASE));
- }
- else
- {
- m_GenericsInstContextStackSlot = NO_GENERICS_INST_CONTEXT;
- }
-
- m_CodeLength = (UINT32) DENORMALIZE_CODE_LENGTH(m_Reader.DecodeVarLengthUnsigned(CODE_LENGTH_ENCBASE));
-
- size_t hasStackBaseRegister = m_Reader.Read(1);
- if(hasStackBaseRegister)
- m_StackBaseRegister = (UINT32) DENORMALIZE_STACK_BASE_REGISTER(m_Reader.DecodeVarLengthUnsigned(STACK_BASE_REGISTER_ENCBASE));
- else
- m_StackBaseRegister = NO_STACK_BASE_REGISTER;
-
- size_t hasSizeOfEditAndContinuePreservedArea = m_Reader.Read(1);
- if(hasSizeOfEditAndContinuePreservedArea)
- m_SizeOfEditAndContinuePreservedArea = (UINT32) m_Reader.DecodeVarLengthUnsigned(SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA_ENCBASE);
- else
- m_SizeOfEditAndContinuePreservedArea = NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA;
-
-#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
- m_SizeOfStackOutgoingAndScratchArea = (UINT32)DENORMALIZE_SIZE_OF_STACK_AREA(m_Reader.DecodeVarLengthUnsigned(SIZE_OF_STACK_AREA_ENCBASE));
-#endif // FIXED_STACK_PARAMETER_SCRATCH_AREA
-
- m_NumInterruptibleRanges = (UINT32) DENORMALIZE_NUM_INTERRUPTIBLE_RANGES(m_Reader.DecodeVarLengthUnsigned(NUM_INTERRUPTIBLE_RANGES_ENCBASE));
-
- if( flags & DECODE_INTERRUPTIBILITY )
- {
- EnumerateInterruptibleRanges(&SetIsInterruptibleCB, this);
- }
-}
-
-
-bool GcInfoDecoder::IsInterruptible()
-{
- _ASSERTE( m_Flags & DECODE_INTERRUPTIBILITY );
- return m_IsInterruptible;
-}
-
-
-void GcInfoDecoder::EnumerateInterruptibleRanges (
- EnumerateInterruptibleRangesCallback *pCallback,
- LPVOID hCallback)
-{
-#if 0
-#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
-
- //------------------------------------------------------------------------------
- // Try partially interruptible first
- //------------------------------------------------------------------------------
-
- UINT32 numCallSites = (UINT32)m_Reader.Read( sizeof( numCallSites ) * 8 );
- UINT32 callSiteIdx = 0;
-
- if( numCallSites > 0 )
- {
- UINT32 numSlotMappings = (UINT32)m_Reader.Read( sizeof( numSlotMappings ) * 8 );
-
- // Align the reader to the next byte to continue decoding
- m_Reader.Skip( ( 8 - ( m_Reader.GetCurrentPos() % 8 ) ) % 8 );
-
- for( callSiteIdx=0; callSiteIdx<numCallSites; callSiteIdx++ )
- {
- UINT32 instructionOffset = (UINT32)m_Reader.Read( 32 );
-
- bool fStop = pCallback(instructionOffset, instructionOffset+1, hCallback);
- if (fStop)
- return;
-
- m_Reader.Skip( numSlotMappings );
- }
-
- // Call site not found. Skip the slot mapping table in preparation for reading the fully-interruptible information
- m_Reader.Skip( numSlotMappings * sizeof( GcSlotDesc ) * 8 );
- }
-
-#endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
-#endif
-
-
- // If no info is found for the call site, we default to fully-interruptbile
- LOG((LF_GCROOTS, LL_INFO1000000, "No GC info found for call site at offset %x. Defaulting to fully-interruptible information.\n", (int) m_InstructionOffset));
-
- // Align the reader to the next byte to continue decoding
- m_Reader.Skip( ( 8 - ( m_Reader.GetCurrentPos() % 8 ) ) % 8 );
-
- UINT32 lastInterruptibleRangeStopOffsetNormalized = 0;
-
- for(UINT32 i=0; i<m_NumInterruptibleRanges; i++)
- {
- UINT32 normStartDelta = (UINT32) m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA_ENCBASE );
- UINT32 normStopDelta = (UINT32) m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA_ENCBASE ) + 1;
-
- UINT32 rangeStartOffsetNormalized = lastInterruptibleRangeStopOffsetNormalized + normStartDelta;
- UINT32 rangeStopOffsetNormalized = rangeStartOffsetNormalized + normStopDelta;
-
- UINT32 rangeStartOffset = DENORMALIZE_CODE_OFFSET(rangeStartOffsetNormalized);
- UINT32 rangeStopOffset = DENORMALIZE_CODE_OFFSET(rangeStopOffsetNormalized);
-
- bool fStop = pCallback(rangeStartOffset, rangeStopOffset, hCallback);
- if (fStop)
- return;
-
- lastInterruptibleRangeStopOffsetNormalized = rangeStopOffsetNormalized;
- }
-}
-
-
-INT32 GcInfoDecoder::GetSecurityObjectStackSlot()
-{
- _ASSERTE( m_Flags & DECODE_SECURITY_OBJECT );
- return m_SecurityObjectStackSlot;
-}
-
-INT32 GcInfoDecoder::GetGenericsInstContextStackSlot()
-{
- _ASSERTE( m_Flags & DECODE_GENERICS_INST_CONTEXT );
- return m_GenericsInstContextStackSlot;
-}
-
-INT32 GcInfoDecoder::GetPSPSymStackSlot()
-{
- _ASSERTE( m_Flags & DECODE_PSP_SYM);
- return m_PSPSymStackSlot;
-}
-
-bool GcInfoDecoder::GetIsVarArg()
-{
- _ASSERTE( m_Flags & DECODE_VARARG );
- return m_IsVarArg;
-}
-
-UINT32 GcInfoDecoder::GetCodeLength()
-{
- _ASSERTE( m_Flags & DECODE_CODE_LENGTH );
- return m_CodeLength;
-}
-
-UINT32 GcInfoDecoder::GetStackBaseRegister()
-{
- return m_StackBaseRegister;
-}
-
-UINT32 GcInfoDecoder::GetSizeOfEditAndContinuePreservedArea()
-{
- _ASSERTE( m_Flags & DECODE_EDIT_AND_CONTINUE );
- return m_SizeOfEditAndContinuePreservedArea;
-}
-
-
-#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
-
-UINT32 GcInfoDecoder::GetSizeOfStackParameterArea()
-{
- return m_SizeOfStackOutgoingAndScratchArea;
-}
-
-#endif // FIXED_STACK_PARAMETER_SCRATCH_AREA
-
-
-bool GcInfoDecoder::EnumerateLiveSlots(
- PREGDISPLAY pRD,
- bool reportScratchSlots,
- unsigned flags,
- GCEnumCallback pCallBack,
- LPVOID hCallBack
- )
-{
- _ASSERTE( m_Flags & DECODE_GC_LIFETIMES );
-
-#if 0
-#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
-
- //------------------------------------------------------------------------------
- // Try partially interruptible first
- //------------------------------------------------------------------------------
-
- UINT32 numCallSites = (UINT32)m_Reader.Read( sizeof( numCallSites ) * 8 );
- UINT32 callSiteIdx = 0;
-
- if( numCallSites > 0 )
- {
- UINT32 numSlotMappings = (UINT32)m_Reader.Read( sizeof( numSlotMappings ) * 8 );
-
- // Align the reader to the next byte to continue decoding
- m_Reader.Skip( ( 8 - ( m_Reader.GetCurrentPos() % 8 ) ) % 8 );
-
- for( callSiteIdx=0; callSiteIdx<numCallSites; callSiteIdx++ )
- {
- UINT32 instructionOffset = (UINT32)m_Reader.Read( 32 );
- if( instructionOffset == m_InstructionOffset )
- {
- m_IsInterruptible = true;
-
- BYTE* callSiteLiveSet = (BYTE*) _alloca( ( numSlotMappings + 7 ) / 8 );
-
- UINT32 i;
- for( i=0; i<numSlotMappings/8; i++ )
- callSiteLiveSet[ i ] = (BYTE)m_Reader.Read( 8 );
-
- callSiteLiveSet[ i ] = (BYTE)m_Reader.Read( numSlotMappings % 8 );
-
- m_Reader.Skip( ( numCallSites - callSiteIdx - 1 ) * ( 32 + numSlotMappings ) );
-
- //---------------------------------------------------------------------------
- // Read slot mappings
- //---------------------------------------------------------------------------
-
- GcSlotDesc* slotMappings = (GcSlotDesc*) _alloca( numSlotMappings * sizeof( GcSlotDesc ) );
- // Assert that we can read a GcSlotDesc with a single call to m_Reader.Read()
- _ASSERTE( sizeof( GcSlotDesc ) <= sizeof ( size_t ) );
- for( UINT32 i=0; i<numSlotMappings; i++ )
- {
- size_t data = m_Reader.Read( sizeof( GcSlotDesc ) * 8 );
- slotMappings[ i ] = *( (GcSlotDesc*) &data );
- }
-
- //---------------------------------------------------------------------------
- // Report live slots
- //---------------------------------------------------------------------------
-
- for( UINT32 i=0; i<numSlotMappings; i++ )
- {
- BYTE isLive = callSiteLiveSet[ i / 8 ] & ( 1 << ( i % 8 ) );
- if( isLive )
- {
- GcSlotDesc slotDesc = slotMappings[ i ];
- if( slotDesc.IsRegister )
- {
- if( reportScratchSlots || !IsScratchRegister( slotDesc.Slot.RegisterNumber, pRD ) )
- {
- ReportRegisterToGC(
- slotDesc.Slot.RegisterNumber,
- slotDesc.IsInterior,
- slotDesc.IsPinned,
- pRD,
- flags,
- pCallBack,
- hCallBack
- );
- }
- else
- {
- LOG((LF_GCROOTS, LL_INFO1000, "\"Live\" scratch register " FMT_REG " not reported\n", slotDesc.Slot.RegisterNumber));
- }
- }
- else
- {
- GcStackSlotBase spBase = (GcStackSlotBase) (slotDesc.Slot.SpOffset & 0x3);
- INT32 realSpOffset = slotDesc.Slot.SpOffset ^ (int) spBase;
-
- if( reportScratchSlots || !IsScratchStackSlot(realSpOffset, spBase, pRD) )
- {
- ReportStackSlotToGC(
- realSpOffset,
- spBase,
- slotDesc.IsInterior,
- slotDesc.IsPinned,
- pRD,
- flags,
- pCallBack,
- hCallBack
- );
- }
- else
- {
- LOG((LF_GCROOTS, LL_INFO1000, "\"Live\" scratch stack slot " FMT_STK " not reported\n", DBG_STK(realSpOffset)));
- }
- }
- }
- }
-
- return true;
- }
-
- m_Reader.Skip( numSlotMappings );
- }
-
- // Call site not found. Skip the slot mapping table in preparation for reading the fully-interruptible information
- m_Reader.Skip( numSlotMappings * sizeof( GcSlotDesc ) * 8 );
- }
-
-#endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
-#endif
-
-
- // If no info is found for the call site, we default to fully-interruptbile
- LOG((LF_GCROOTS, LL_INFO1000000, "No GC info found for call site at offset %x. Defaulting to fully-interruptible information.\n", (int) m_InstructionOffset));
-
- // Align the reader to the next byte to continue decoding
- m_Reader.Skip( ( 8 - ( m_Reader.GetCurrentPos() % 8 ) ) % 8 );
-
- // Skip interruptibility information
- for(UINT32 i=0; i<m_NumInterruptibleRanges; i++)
- {
- m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA_ENCBASE );
- m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA_ENCBASE );
- }
-
- //
- // If this is a non-leaf frame and we are executing a call, the unwinder has given us the PC
- // of the call instruction. We should adjust it to the PC of the instruction after the call in order to
- // obtain transition information for scratch slots. However, we always assume scratch slots to be
- // dead for non-leaf frames (except for ResumableFrames), so we don't need to adjust the PC.
- // If this is a non-leaf frame and we are not executing a call (i.e.: a fault occurred in the function),
- // then it would be incorrect to ajust the PC
- //
-
- int lifetimeTransitionsCount = 0;
-
- //--------------------------------------------------------------------
- // Decode registers
- //--------------------------------------------------------------------
-
- size_t numRegisters = m_Reader.DecodeVarLengthUnsigned(NUM_REGISTERS_ENCBASE);
-
- {
-#ifdef ENABLE_CONTRACTS_IMPL
- CONTRACT_VIOLATION(FaultViolation | FaultNotFatal);
-#endif
- m_pLiveRegisters = (GcSlotDesc*) qbSlots1.AllocNoThrow(sizeof(GcSlotDesc)*numRegisters);
- }
- if (m_pLiveRegisters == NULL)
- {
- return false;
- }
-
-
- _ASSERTE(m_pLiveRegisters);
-
- int lastNormRegNum = 0;
-
- for(int i=0; i<numRegisters; i++)
- {
- if( i==0 )
- {
- lastNormRegNum = (int) m_Reader.DecodeVarLengthUnsigned(REGISTER_ENCBASE);
- }
- else
- {
- int normRegDelta = (int) m_Reader.DecodeVarLengthUnsigned(REGISTER_DELTA_ENCBASE) + 1;
- lastNormRegNum += normRegDelta;
- }
- int regNum = DENORMALIZE_REGISTER(lastNormRegNum);
-
- BOOL isInterior = FALSE;
- BOOL isPinned = FALSE;
- BOOL isLive = FALSE;
-
- size_t normCodeOffset = (size_t)(SSIZE_T)(-1);
- BOOL becomesLive = TRUE;
- for(;;)
- {
- size_t normCodeOffsetDelta = m_Reader.DecodeVarLengthUnsigned(NORM_CODE_OFFSET_DELTA_ENCBASE);
- if(normCodeOffsetDelta == 0) // terminator
- break;
-
- if(normCodeOffset != (size_t)(SSIZE_T)(-1))
- becomesLive = (BOOL) m_Reader.Read(1);
-
- normCodeOffset += normCodeOffsetDelta;
-
- UINT32 instructionOffset = DENORMALIZE_CODE_OFFSET((UINT32)normCodeOffset);
-
- BOOL becomesInterior = FALSE;
- BOOL becomesPinned = FALSE;
-
- if(becomesLive)
- {
- if(m_Reader.Read(1))
- {
- size_t flagEnc = m_Reader.Read( 2 );
- becomesInterior = (BOOL)(flagEnc & 0x1);
- becomesPinned = (BOOL)(flagEnc & 0x2);
- }
- }
-
- lifetimeTransitionsCount++;
-
- LOG((LF_GCROOTS, LL_INFO1000000,
- "Transition " FMT_PIPTR "in " FMT_REG "going %s at offset %04x.\n",
- DBG_PIN_NAME(becomesPinned), DBG_IPTR_NAME(becomesInterior), regNum,
- becomesLive ? "live" : "dead",
- (int) instructionOffset ));
-
- if( instructionOffset > m_InstructionOffset )
- continue;
-
- isLive = becomesLive;
- isInterior = becomesInterior;
- isPinned = becomesPinned;
- }
-
- if( isLive )
- {
- if( reportScratchSlots || !IsScratchRegister( regNum, pRD ) )
- {
- m_pLiveRegisters[m_NumLiveRegisters].Slot.RegisterNumber = regNum;
- GcSlotFlags flags = GC_SLOT_BASE;
- if(isInterior)
- flags = (GcSlotFlags) (flags | GC_SLOT_INTERIOR);
- if(isPinned)
- flags = (GcSlotFlags) (flags | GC_SLOT_PINNED);
-
- m_pLiveRegisters[m_NumLiveRegisters].Flags = flags;
- m_NumLiveRegisters++;
- }
- else
- {
- LOG((LF_GCROOTS, LL_INFO1000, "\"Live\" scratch register " FMT_REG " not reported\n", regNum));
- }
- }
- }
-
- //--------------------------------------------------------------------
- // Decode stack slots
- //--------------------------------------------------------------------
-
- size_t numStackSlots = m_Reader.DecodeVarLengthUnsigned(NUM_STACK_SLOTS_ENCBASE);
- {
-#ifdef ENABLE_CONTRACTS_IMPL
- CONTRACT_VIOLATION(FaultViolation | FaultNotFatal);
-#endif
- m_pLiveStackSlots = (GcSlotDesc*) qbSlots2.AllocNoThrow(sizeof(GcSlotDesc)*numStackSlots);
- }
- if (m_pLiveStackSlots == NULL)
- {
- return false;
- }
- _ASSERTE(m_pLiveStackSlots);
-
- INT32 lastNormStackSlot = 0;
-
- for(int i=0; i<numStackSlots; i++)
- {
- if( i==0 )
- {
- lastNormStackSlot = (INT32) m_Reader.DecodeVarLengthSigned(STACK_SLOT_ENCBASE);
- }
- else
- {
- INT32 normStackSlotDelta = (INT32) m_Reader.DecodeVarLengthUnsigned(STACK_SLOT_DELTA_ENCBASE);
- lastNormStackSlot += normStackSlotDelta;
- }
- INT32 spOffset = DENORMALIZE_STACK_SLOT(lastNormStackSlot);
- GcStackSlotBase spBase = (GcStackSlotBase) m_Reader.Read(2);
-
- BOOL isInterior = FALSE;
- BOOL isPinned = FALSE;
- BOOL isLive = FALSE;
-
- size_t normCodeOffset = (size_t)(SSIZE_T)(-1);
- BOOL becomesLive = TRUE;
- for(;;)
- {
- size_t normCodeOffsetDelta = m_Reader.DecodeVarLengthUnsigned(NORM_CODE_OFFSET_DELTA_ENCBASE);
- if(normCodeOffsetDelta == 0) // terminator
- break;
-
- if(normCodeOffset != (size_t)(SSIZE_T)(-1))
- becomesLive = (BOOL) m_Reader.Read(1);
-
- normCodeOffset += normCodeOffsetDelta;
-
- UINT32 instructionOffset = DENORMALIZE_CODE_OFFSET((UINT32)normCodeOffset);
-
- BOOL becomesInterior = FALSE;
- BOOL becomesPinned = FALSE;
-
- if(becomesLive)
- {
- if(m_Reader.Read(1))
- {
- size_t flagEnc = m_Reader.Read( 2 );
- becomesInterior = (BOOL)(flagEnc & 0x1);
- becomesPinned = (BOOL)(flagEnc & 0x2);
- }
- }
-
- lifetimeTransitionsCount++;
-
- LOG((LF_GCROOTS, LL_INFO1000000,
- "Transition " FMT_PIPTR "in " FMT_STK "going %s at offset %04x.\n",
- DBG_PIN_NAME(becomesPinned), DBG_IPTR_NAME(becomesInterior), DBG_STK(spOffset),
- becomesLive ? "live" : "dead",
- (int) instructionOffset ));
-
- if( instructionOffset > m_InstructionOffset )
- continue;
-
- isLive = becomesLive;
- isInterior = becomesInterior;
- isPinned = becomesPinned;
- }
-
- if( isLive )
- {
- if( reportScratchSlots || !IsScratchStackSlot(spOffset, spBase, pRD) )
- {
- m_pLiveStackSlots[m_NumLiveStackSlots].Slot.Stack.SpOffset = spOffset;
- m_pLiveStackSlots[m_NumLiveStackSlots].Slot.Stack.Base = spBase;
- GcSlotFlags flags = GC_SLOT_BASE;
- if(isInterior)
- flags = (GcSlotFlags) (flags | GC_SLOT_INTERIOR);
- if(isPinned)
- flags = (GcSlotFlags) (flags | GC_SLOT_PINNED);
-
- m_pLiveStackSlots[m_NumLiveStackSlots].Flags = flags;
- m_NumLiveStackSlots++;
- }
- else
- {
- LOG((LF_GCROOTS, LL_INFO1000, "\"Live\" scratch stack slot " FMT_STK " not reported\n", DBG_STK(spOffset)));
- }
- }
- }
-
-
- LOG((LF_GCROOTS, LL_INFO1000000, "Decoded %d lifetime transitions.\n", (int) lifetimeTransitionsCount ));
-
- return true;
-}
-
-void GcInfoDecoder::VerifyLiveRegister(
- UINT32 regNum,
- GcSlotFlags flags
- )
-{
- _ASSERTE(m_pLiveRegisters);
-
- // If this assert fails, the slot being passed was not found to be live in this decoder
- _ASSERTE(m_NumLiveRegisters > 0);
-
- int pos;
- for(pos = 0; pos < m_NumLiveRegisters; pos++)
- {
- if(regNum == m_pLiveRegisters[pos].Slot.RegisterNumber &&
- flags == m_pLiveRegisters[pos].Flags)
- {
- break;
- }
- }
-
- // If this assert fails, the slot being passed was not found to be live in this decoder
- _ASSERTE(pos < m_NumLiveRegisters);
-
- m_pLiveRegisters[pos] = m_pLiveRegisters[--m_NumLiveRegisters];
-}
-
-void GcInfoDecoder::VerifyLiveStackSlot(
- INT32 spOffset,
- GcStackSlotBase spBase,
- GcSlotFlags flags
- )
-{
- _ASSERTE(m_pLiveStackSlots);
-
- // If this assert fails, the slot being passed was not found to be live in this decoder
- _ASSERTE(m_NumLiveStackSlots > 0);
-
- int pos;
- for(pos = 0; pos < m_NumLiveStackSlots; pos++)
- {
- if(spOffset == m_pLiveStackSlots[pos].Slot.Stack.SpOffset &&
- spBase == m_pLiveStackSlots[pos].Slot.Stack.Base &&
- flags == m_pLiveStackSlots[pos].Flags)
- {
- break;
- }
- }
-
- // If this assert fails, the slot being passed was not found to be live in this decoder
- _ASSERTE(pos < m_NumLiveStackSlots);
-
- m_pLiveStackSlots[pos] = m_pLiveStackSlots[--m_NumLiveStackSlots];
-}
-
-void GcInfoDecoder::DoFinalVerification()
-{
- // If this assert fails, the m_NumLiveRegisters slots remaining in m_pLiveRegisters
- // were not reported by the calling decoder
- _ASSERTE(m_NumLiveRegisters == 0);
-
- // If this assert fails, the m_NumLiveStackSlots slots remaining in m_pLiveStackSlots
- // were not reported by the calling decoder
- _ASSERTE(m_NumLiveStackSlots == 0);
-
-}
-
-//-----------------------------------------------------------------------------
-// Platform-specific methods
-//-----------------------------------------------------------------------------
-
-#if defined(_TARGET_AMD64_)
-
-
-OBJECTREF* GcInfoDecoder::GetRegisterSlot(
- int regNum,
- PREGDISPLAY pRD
- )
-{
- _ASSERTE(regNum >= 0 && regNum <= 16);
- _ASSERTE(regNum != 4); // rsp
-
- // The fields of KNONVOLATILE_CONTEXT_POINTERS are in the same order as
- // the processor encoding numbers.
-
- ULONGLONG **ppRax;
-#ifdef _NTAMD64_
- ppRax = &pRD->pCurrentContextPointers->Rax;
-#else
- ppRax = &pRD->pCurrentContextPointers->Integer.Register.Rax;
-#endif
-
- return (OBJECTREF*)*(ppRax + regNum);
-}
-
-
-bool GcInfoDecoder::IsScratchRegister(int regNum, PREGDISPLAY pRD)
-{
- _ASSERTE(regNum >= 0 && regNum <= 16);
- _ASSERTE(regNum != 4); // rsp
-
- UINT16 PreservedRegMask =
- (1 << 3) // rbx
- | (1 << 5) // rbp
- | (1 << 6) // rsi
- | (1 << 7) // rdi
- | (1 << 12) // r12
- | (1 << 13) // r13
- | (1 << 14) // r14
- | (1 << 15); // r15
-
- return !(PreservedRegMask & (1 << regNum));
-}
-
-
-bool GcInfoDecoder::IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, PREGDISPLAY pRD)
-{
-#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
- _ASSERTE( m_Flags & DECODE_GC_LIFETIMES );
-
- ULONGLONG pSlot = (ULONGLONG) GetStackSlot(spOffset, spBase, pRD);
- _ASSERTE(pSlot >= pRD->SP);
-
- return (pSlot < pRD->SP + m_SizeOfStackOutgoingAndScratchArea);
-#else
- return FALSE;
-#endif
-}
-
-
-void GcInfoDecoder::ReportRegisterToGC( // AMD64
- int regNum,
- BOOL isInterior,
- BOOL isPinned,
- PREGDISPLAY pRD,
- unsigned flags,
- GCEnumCallback pCallBack,
- LPVOID hCallBack)
-{
- GCINFODECODER_CONTRACT(CONTRACTL {
- NOTHROW;
- GC_NOTRIGGER;
- } CONTRACTL_END);
-
- _ASSERTE(regNum >= 0 && regNum <= 16);
- _ASSERTE(regNum != 4); // rsp
-
- LOG((LF_GCROOTS, LL_INFO1000, "Reporting " FMT_REG, regNum ));
-
- OBJECTREF* pObjRef = GetRegisterSlot( regNum, pRD );
-
-#ifdef _DEBUG
- if(IsScratchRegister(regNum, pRD))
- {
- // Scratch registers cannot be reported for non-leaf frames
- _ASSERTE(flags & ActiveStackFrame);
- }
-
- LOG((LF_GCROOTS, LL_INFO1000, /* Part Two */
- "at" FMT_ADDR "as ", DBG_ADDR(pObjRef) ));
-
- VALIDATE_ROOT(isInterior, hCallBack, pObjRef);
-
- LOG((LF_GCROOTS, LL_INFO1000, /* Part Three */
- LOG_PIPTR_OBJECT_CLASS(OBJECTREF_TO_UNCHECKED_OBJECTREF(*pObjRef), isPinned, isInterior)));
-#endif //_DEBUG
-
- DWORD gcFlags = CHECK_APP_DOMAIN;
-
- if (isInterior)
- gcFlags |= GC_CALL_INTERIOR;
-
- if (isPinned)
- gcFlags |= GC_CALL_PINNED;
-
- pCallBack(hCallBack, pObjRef, gcFlags);
-}
-
-#else // Unknown platform
-
-OBJECTREF* GcInfoDecoder::GetRegisterSlot(
- int regNum,
- PREGDISPLAY pRD
- )
-{
- PORTABILITY_ASSERT("GcInfoDecoder::GetRegisterSlot");
- return NULL;
-}
-
-bool GcInfoDecoder::IsScratchRegister(int regNum, PREGDISPLAY pRD)
-{
- PORTABILITY_ASSERT("GcInfoDecoder::IsScratchRegister");
- return false;
-}
-
-bool GcInfoDecoder::IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, PREGDISPLAY pRD)
-{
- _ASSERTE( !"NYI" );
- return false;
-}
-
-void GcInfoDecoder::ReportRegisterToGC(
- int regNum,
- BOOL isInterior,
- BOOL isPinned,
- PREGDISPLAY pRD,
- unsigned flags,
- GCEnumCallback pCallBack,
- LPVOID hCallBack)
-{
- _ASSERTE( !"NYI" );
-}
-
-#endif // Unknown platform
-
-
-OBJECTREF* GcInfoDecoder::GetStackSlot(
- INT32 spOffset,
- GcStackSlotBase spBase,
- PREGDISPLAY pRD
- )
-{
- OBJECTREF* pObjRef;
-
- if( GC_SP_REL == spBase )
- {
- pObjRef = (OBJECTREF*) ((SIZE_T)GetRegdisplaySP(pRD) + spOffset);
- }
- else if( GC_CALLER_SP_REL == spBase )
- {
- pObjRef = (OBJECTREF*) (GET_CALLER_SP(pRD) + spOffset);
- }
- else
- {
- _ASSERTE( GC_FRAMEREG_REL == spBase );
- _ASSERTE( NO_STACK_BASE_REGISTER != m_StackBaseRegister );
-
- pObjRef = (OBJECTREF*)((*((INT64*)(GetRegisterSlot( m_StackBaseRegister, pRD )))) + spOffset);
- }
-
- return pObjRef;
-}
-
-void GcInfoDecoder::ReportStackSlotToGC(
- INT32 spOffset,
- GcStackSlotBase spBase,
- BOOL isInterior,
- BOOL isPinned,
- PREGDISPLAY pRD,
- unsigned flags,
- GCEnumCallback pCallBack,
- LPVOID hCallBack)
-{
- GCINFODECODER_CONTRACT(CONTRACTL {
- NOTHROW;
- GC_NOTRIGGER;
- } CONTRACTL_END);
-
- OBJECTREF* pObjRef = GetStackSlot(spOffset, spBase, pRD);
- _ASSERTE( IS_ALIGNED( pObjRef, sizeof( Object* ) ) );
-
-#ifdef _DEBUG
- LOG((LF_GCROOTS, LL_INFO1000, /* Part One */
- "Reporting %s" FMT_STK,
- ( (GC_SP_REL == spBase) ? "" :
- ((GC_CALLER_SP_REL == spBase) ? "caller's " :
- ((GC_FRAMEREG_REL == spBase) ? "frame " : "<unrecognized GcStackSlotBase> "))),
- DBG_STK(spOffset) ));
-
- LOG((LF_GCROOTS, LL_INFO1000, /* Part Two */
- "at" FMT_ADDR "as ", DBG_ADDR(pObjRef) ));
-
- VALIDATE_ROOT(isInterior, hCallBack, pObjRef);
-
- LOG((LF_GCROOTS, LL_INFO1000, /* Part Three */
- LOG_PIPTR_OBJECT_CLASS(OBJECTREF_TO_UNCHECKED_OBJECTREF(*pObjRef), isPinned, isInterior)));
-#endif
-
- DWORD gcFlags = CHECK_APP_DOMAIN;
-
- if (isInterior)
- gcFlags |= GC_CALL_INTERIOR;
-
- if (isPinned)
- gcFlags |= GC_CALL_PINNED;
-
- pCallBack(hCallBack, pObjRef, gcFlags);
-}
-
-}
-
-#endif // USE_GC_INFO_DECODER
-#endif // VERIFY_GCINFO
diff --git a/src/vm/debughelp.cpp b/src/vm/debughelp.cpp
index 7e4455a7ff..df769455aa 100644
--- a/src/vm/debughelp.cpp
+++ b/src/vm/debughelp.cpp
@@ -1198,24 +1198,24 @@ void DumpGCInfo(MethodDesc* method)
_ASSERTE(codeInfo.GetRelOffset() == 0);
ICodeManager* codeMan = codeInfo.GetCodeManager();
- BYTE* table = (BYTE*) codeInfo.GetGCInfo();
+ GCInfoToken table = codeInfo.GetGCInfoToken();
unsigned methodSize = (unsigned)codeMan->GetFunctionSize(table);
- GCDump gcDump;
+ GCDump gcDump(table.Version);
+ PTR_CBYTE gcInfo = PTR_CBYTE(table.Info);
gcDump.gcPrintf = printfToDbgOut;
InfoHdr header;
printfToDbgOut ("Method info block:\n");
-
- table += gcDump.DumpInfoHdr(table, &header, &methodSize, 0);
+ gcInfo += gcDump.DumpInfoHdr(gcInfo, &header, &methodSize, 0);
printfToDbgOut ("\n");
printfToDbgOut ("Pointer table:\n");
- table += gcDump.DumpGCTable(table, header, methodSize, 0);
+ gcInfo += gcDump.DumpGCTable(gcInfo, header, methodSize, 0);
}
void DumpGCInfoMD(size_t method)
diff --git a/src/vm/eedbginterfaceimpl.cpp b/src/vm/eedbginterfaceimpl.cpp
index 53cb288319..93decc9b0d 100644
--- a/src/vm/eedbginterfaceimpl.cpp
+++ b/src/vm/eedbginterfaceimpl.cpp
@@ -665,10 +665,8 @@ size_t EEDbgInterfaceImpl::GetFunctionSize(MethodDesc *pFD)
return 0;
EECodeInfo codeInfo(methodStart);
-
- PTR_VOID methodInfo = codeInfo.GetGCInfo();
-
- return codeInfo.GetCodeManager()->GetFunctionSize(methodInfo);
+ GCInfoToken gcInfoToken = codeInfo.GetGCInfoToken();
+ return codeInfo.GetCodeManager()->GetFunctionSize(gcInfoToken);
}
#endif //!DACCESS_COMPILE
diff --git a/src/vm/eetwain.cpp b/src/vm/eetwain.cpp
index 69eb177542..82b76f69e0 100644
--- a/src/vm/eetwain.cpp
+++ b/src/vm/eetwain.cpp
@@ -11,8 +11,6 @@
#define RETURN_ADDR_OFFS 1 // in DWORDS
-#include "gcinfo.h"
-
#ifdef USE_GC_INFO_DECODER
#include "gcinfodecoder.h"
#endif
@@ -942,14 +940,14 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx,
// GCInfo for old method
GcInfoDecoder oldGcDecoder(
- dac_cast<PTR_CBYTE>(pOldCodeInfo->GetGCInfo()),
+ pOldCodeInfo->GetGCInfoToken(),
GcInfoDecoderFlags(DECODE_SECURITY_OBJECT | DECODE_PSP_SYM | DECODE_EDIT_AND_CONTINUE),
0 // Instruction offset (not needed)
);
// GCInfo for new method
GcInfoDecoder newGcDecoder(
- dac_cast<PTR_CBYTE>(pNewCodeInfo->GetGCInfo()),
+ pNewCodeInfo->GetGCInfoToken(),
GcInfoDecoderFlags(DECODE_SECURITY_OBJECT | DECODE_PSP_SYM | DECODE_EDIT_AND_CONTINUE),
0 // Instruction offset (not needed)
);
@@ -1437,8 +1435,10 @@ bool EECodeManager::IsGcSafe( EECodeInfo *pCodeInfo,
GC_NOTRIGGER;
} CONTRACTL_END;
+ GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
+
GcInfoDecoder gcInfoDecoder(
- dac_cast<PTR_CBYTE>(pCodeInfo->GetGCInfo()),
+ gcInfoToken,
DECODE_INTERRUPTIBILITY,
dwRelOffset
);
@@ -1502,13 +1502,11 @@ bool FindEndOfLastInterruptibleRegionCB (
*/
unsigned EECodeManager::FindEndOfLastInterruptibleRegion(unsigned curOffset,
unsigned endOffset,
- PTR_VOID methodInfoPtr)
+ GCInfoToken gcInfoToken)
{
#ifndef DACCESS_COMPILE
- BYTE* gcInfoAddr = (BYTE*) methodInfoPtr;
-
GcInfoDecoder gcInfoDecoder(
- gcInfoAddr,
+ gcInfoToken,
DECODE_FOR_RANGES_CALLBACK,
0);
@@ -4758,7 +4756,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD,
methodName, curOffs));
#endif
- PTR_BYTE gcInfoAddr = dac_cast<PTR_BYTE>(pCodeInfo->GetGCInfo());
+ GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
#if defined(STRESS_HEAP) && defined(PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED)
#ifdef USE_GC_INFO_DECODER
@@ -4770,7 +4768,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD,
if (flags & ActiveStackFrame)
{
GcInfoDecoder _gcInfoDecoder(
- gcInfoAddr,
+ gcInfoToken,
DECODE_INTERRUPTIBILITY,
curOffs
);
@@ -4778,7 +4776,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD,
{
// This must be the offset after a call
#ifdef _DEBUG
- GcInfoDecoder _safePointDecoder(gcInfoAddr, (GcInfoDecoderFlags)0, 0);
+ GcInfoDecoder _safePointDecoder(gcInfoToken, (GcInfoDecoderFlags)0, 0);
_ASSERTE(_safePointDecoder.IsSafePoint(curOffs));
#endif
flags &= ~((unsigned)ActiveStackFrame);
@@ -4791,7 +4789,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD,
if (flags & ActiveStackFrame)
{
GcInfoDecoder _gcInfoDecoder(
- gcInfoAddr,
+ gcInfoToken,
DECODE_INTERRUPTIBILITY,
curOffs
);
@@ -4839,7 +4837,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD,
// We've been given an override offset for GC Info
#ifdef _DEBUG
GcInfoDecoder _gcInfoDecoder(
- gcInfoAddr,
+ gcInfoToken,
DECODE_CODE_LENGTH,
0
);
@@ -4884,7 +4882,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD,
GcInfoDecoder gcInfoDecoder(
- gcInfoAddr,
+ gcInfoToken,
GcInfoDecoderFlags (DECODE_GC_LIFETIMES | DECODE_SECURITY_OBJECT | DECODE_VARARG),
curOffs
);
@@ -5027,7 +5025,7 @@ OBJECTREF* EECodeManager::GetAddrOfSecurityObject(CrawlFrame *pCF)
unsigned relOffset = pCF->GetRelOffset();
CodeManState* pState = pCF->GetCodeManState();
- PTR_VOID methodInfoPtr = pJitMan->GetGCInfo(methodToken);
+ GCInfoToken gcInfoToken = pJitMan->GetGCInfoToken(methodToken);
_ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
@@ -5035,7 +5033,7 @@ OBJECTREF* EECodeManager::GetAddrOfSecurityObject(CrawlFrame *pCF)
CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
/* Extract the necessary information from the info block header */
- stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(methodInfoPtr, // <TODO>truncation</TODO>
+ stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(gcInfoToken.Info, // <TODO>truncation</TODO>
relOffset,
&stateBuf->hdrInfoBody);
@@ -5051,10 +5049,8 @@ OBJECTREF* EECodeManager::GetAddrOfSecurityObject(CrawlFrame *pCF)
}
#elif defined(USE_GC_INFO_DECODER) && !defined(CROSSGEN_COMPILE)
- BYTE* gcInfoAddr = (BYTE*) methodInfoPtr;
-
GcInfoDecoder gcInfoDecoder(
- gcInfoAddr,
+ gcInfoToken,
DECODE_SECURITY_OBJECT,
0
);
@@ -5270,11 +5266,10 @@ GenericParamContextType EECodeManager::GetParamContextType(PREGDISPLAY pCont
}
// On x86 the generic param context parameter is never this.
#elif defined(USE_GC_INFO_DECODER)
- PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
- PTR_CBYTE gcInfoAddr = PTR_CBYTE(methodInfoPtr);
+ GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
GcInfoDecoder gcInfoDecoder(
- gcInfoAddr,
+ gcInfoToken,
GcInfoDecoderFlags (DECODE_GENERICS_INST_CONTEXT),
0
);
@@ -5363,11 +5358,10 @@ PTR_VOID EECodeManager::GetExactGenericsToken(SIZE_T baseStackSlot,
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
- PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
- PTR_CBYTE gcInfoAddr = PTR_CBYTE(methodInfoPtr);
+ GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
GcInfoDecoder gcInfoDecoder(
- gcInfoAddr,
+ gcInfoToken,
GcInfoDecoderFlags (DECODE_PSP_SYM | DECODE_GENERICS_INST_CONTEXT),
0
);
@@ -5432,7 +5426,7 @@ void * EECodeManager::GetGSCookieAddr(PREGDISPLAY pContext,
_ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
- PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
unsigned relOffset = pCodeInfo->GetRelOffset();
#if defined(_TARGET_X86_)
@@ -5440,7 +5434,7 @@ void * EECodeManager::GetGSCookieAddr(PREGDISPLAY pContext,
/* Extract the necessary information from the info block header */
hdrInfo * info = &stateBuf->hdrInfoBody;
- stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(methodInfoPtr, // <TODO>truncation</TODO>
+ stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(gcInfoToken.Info, // <TODO>truncation</TODO>
relOffset,
info);
@@ -5459,22 +5453,20 @@ void * EECodeManager::GetGSCookieAddr(PREGDISPLAY pContext,
}
else
{
- PTR_CBYTE table = PTR_CBYTE(methodInfoPtr) + stateBuf->hdrInfoSize;
+ PTR_CBYTE table = PTR_CBYTE(gcInfoToken.Info) + stateBuf->hdrInfoSize;
unsigned argSize = GetPushedArgSize(info, table, relOffset);
return PVOID(SIZE_T(pContext->Esp + argSize + info->gsCookieOffset));
}
#elif defined(USE_GC_INFO_DECODER) && !defined(CROSSGEN_COMPILE)
- PTR_CBYTE gcInfoAddr = PTR_CBYTE(methodInfoPtr);
-
if (pCodeInfo->IsFunclet())
{
return NULL;
}
GcInfoDecoder gcInfoDecoder(
- gcInfoAddr,
+ gcInfoToken,
DECODE_GS_COOKIE,
0
);
@@ -5567,7 +5559,7 @@ bool EECodeManager::IsInSynchronizedRegion(
*
* Returns the size of a given function.
*/
-size_t EECodeManager::GetFunctionSize(PTR_VOID methodInfoPtr)
+size_t EECodeManager::GetFunctionSize(GCInfoToken gcInfoToken)
{
CONTRACTL {
NOTHROW;
@@ -5577,16 +5569,15 @@ size_t EECodeManager::GetFunctionSize(PTR_VOID methodInfoPtr)
#if defined(_TARGET_X86_)
hdrInfo info;
+ PTR_VOID methodInfoPtr = gcInfoToken.Info;
crackMethodInfoHdr(methodInfoPtr, 0, &info);
return info.methodSize;
#elif defined(USE_GC_INFO_DECODER)
- PTR_BYTE gcInfoAddr = PTR_BYTE(methodInfoPtr);
-
GcInfoDecoder gcInfoDecoder(
- gcInfoAddr,
+ gcInfoToken,
DECODE_CODE_LENGTH,
0
);
diff --git a/src/vm/gccover.cpp b/src/vm/gccover.cpp
index 2dd7c9e2a5..3220cddd8e 100644
--- a/src/vm/gccover.cpp
+++ b/src/vm/gccover.cpp
@@ -79,7 +79,7 @@ void SetupAndSprinkleBreakpoints(
gcCover->methodRegion = methodRegionInfo;
gcCover->codeMan = pCodeInfo->GetCodeManager();
- gcCover->gcInfo = pCodeInfo->GetGCInfo();
+ gcCover->gcInfoToken = pCodeInfo->GetGCInfoToken();
gcCover->callerThread = 0;
gcCover->doingEpilogChecks = true;
@@ -286,7 +286,7 @@ class GCCoverageRangeEnumerator
private:
ICodeManager *m_pCodeManager;
- LPVOID m_pvGCInfo;
+ GCInfoToken m_pvGCTable;
BYTE *m_codeStart;
BYTE *m_codeEnd;
BYTE *m_curFuncletEnd;
@@ -318,7 +318,7 @@ private:
unsigned ofsLastInterruptible = m_pCodeManager->FindEndOfLastInterruptibleRegion(
static_cast<unsigned int>(pCurFunclet - m_codeStart),
static_cast<unsigned int>(m_curFuncletEnd - m_codeStart),
- m_pvGCInfo);
+ m_pvGCTable);
if (ofsLastInterruptible)
{
@@ -332,10 +332,10 @@ private:
public:
- GCCoverageRangeEnumerator (ICodeManager *pCodeManager, LPVOID pvGCInfo, BYTE *codeStart, SIZE_T codeSize)
+ GCCoverageRangeEnumerator (ICodeManager *pCodeManager, GCInfoToken pvGCTable, BYTE *codeStart, SIZE_T codeSize)
{
m_pCodeManager = pCodeManager;
- m_pvGCInfo = pvGCInfo;
+ m_pvGCTable = pvGCTable;
m_codeStart = codeStart;
m_codeEnd = codeStart + codeSize;
m_nextFunclet = codeStart;
@@ -458,9 +458,9 @@ void GCCoverageInfo::SprinkleBreakpoints(
#ifdef _TARGET_AMD64_
- GCCoverageRangeEnumerator rangeEnum(codeMan, gcInfo, codeStart, codeSize);
+ GCCoverageRangeEnumerator rangeEnum(codeMan, gcInfoToken, codeStart, codeSize);
- GcInfoDecoder safePointDecoder((const BYTE*)gcInfo, (GcInfoDecoderFlags)0, 0);
+ GcInfoDecoder safePointDecoder(gcInfoToken, (GcInfoDecoderFlags)0, 0);
bool fSawPossibleSwitch = false;
#endif
@@ -582,7 +582,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
#ifdef _TARGET_X86_
// we will whack every instruction in the prolog and epilog to make certain
// our unwinding logic works there.
- if (codeMan->IsInPrologOrEpilog((cur - codeStart) + (DWORD)regionOffsetAdj, gcInfo, NULL)) {
+ if (codeMan->IsInPrologOrEpilog((cur - codeStart) + (DWORD)regionOffsetAdj, gcInfoToken.Info, NULL)) {
*cur = INTERRUPT_INSTR;
}
#endif
@@ -632,7 +632,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
}
}
- GcInfoDecoder safePointDecoder((const BYTE*)gcInfo, (GcInfoDecoderFlags)0, 0);
+ GcInfoDecoder safePointDecoder(gcInfoToken, (GcInfoDecoderFlags)0, 0);
assert(methodRegion.hotSize > 0);
@@ -1469,7 +1469,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
/* are we in a prolog or epilog? If so just test the unwind logic
but don't actually do a GC since the prolog and epilog are not
GC safe points */
- if (gcCover->codeMan->IsInPrologOrEpilog(offset, gcCover->gcInfo, NULL))
+ if (gcCover->codeMan->IsInPrologOrEpilog(offset, gcCover->gcInfoToken.Info, NULL))
{
// We are not at a GC safe point so we can't Suspend EE (Suspend EE will yield to GC).
// But we still have to update the GC Stress instruction. We do it directly without suspending
diff --git a/src/vm/gccover.h b/src/vm/gccover.h
index 0308f473f2..b2dedefa31 100644
--- a/src/vm/gccover.h
+++ b/src/vm/gccover.h
@@ -26,7 +26,7 @@ public:
// Following 6 variables are for prolog / epilog walking coverage
ICodeManager* codeMan; // CodeMan for this method
- void* gcInfo; // gcInfo for this method
+ GCInfoToken gcInfoToken; // gcInfo for this method
Thread* callerThread; // Thread associated with context callerRegs
T_CONTEXT callerRegs; // register state when method was entered
diff --git a/src/vm/gcenv.ee.cpp b/src/vm/gcenv.ee.cpp
index aa1edbb555..5ecae4f8fc 100644
--- a/src/vm/gcenv.ee.cpp
+++ b/src/vm/gcenv.ee.cpp
@@ -132,7 +132,7 @@ inline bool SafeToReportGenericParamContext(CrawlFrame* pCF)
#else // USE_GC_INFO_DECODER
- GcInfoDecoder gcInfoDecoder((PTR_CBYTE)pCF->GetGCInfo(),
+ GcInfoDecoder gcInfoDecoder(pCF->GetGCInfoToken(),
DECODE_PROLOG_LENGTH,
0);
UINT32 prologLength = gcInfoDecoder.GetPrologSize();
@@ -199,8 +199,8 @@ bool FindFirstInterruptiblePointStateCB(
// the end is exclusive). Return -1 if no such point exists.
unsigned FindFirstInterruptiblePoint(CrawlFrame* pCF, unsigned offs, unsigned endOffs)
{
- PTR_BYTE gcInfoAddr = dac_cast<PTR_BYTE>(pCF->GetCodeInfo()->GetGCInfo());
- GcInfoDecoder gcInfoDecoder(gcInfoAddr, DECODE_FOR_RANGES_CALLBACK, 0);
+ GCInfoToken gcInfoToken = pCF->GetGCInfoToken();
+ GcInfoDecoder gcInfoDecoder(gcInfoToken, DECODE_FOR_RANGES_CALLBACK, 0);
FindFirstInterruptiblePointState state;
state.offs = offs;
@@ -281,9 +281,9 @@ StackWalkAction GcStackCrawlCallBack(CrawlFrame* pCF, VOID* pData)
#if defined(WIN64EXCEPTIONS)
if (pCF->ShouldParentToFuncletUseUnwindTargetLocationForGCReporting())
{
- PTR_BYTE gcInfoAddr = dac_cast<PTR_BYTE>(pCF->GetCodeInfo()->GetGCInfo());
+ GCInfoToken gcInfoToken = pCF->GetGCInfoToken();
GcInfoDecoder _gcInfoDecoder(
- gcInfoAddr,
+ gcInfoToken,
DECODE_CODE_LENGTH,
0
);
diff --git a/src/vm/gcinfodecoder.cpp b/src/vm/gcinfodecoder.cpp
index 5a3bbd94eb..351e221d82 100644
--- a/src/vm/gcinfodecoder.cpp
+++ b/src/vm/gcinfodecoder.cpp
@@ -6,7 +6,6 @@
#include "common.h"
#include "gcinfodecoder.h"
-
#ifdef USE_GC_INFO_DECODER
#ifndef CHECK_APP_DOMAIN
@@ -84,28 +83,17 @@ bool GcInfoDecoder::SetIsInterruptibleCB (UINT32 startOffset, UINT32 stopOffset,
GcInfoDecoder::GcInfoDecoder(
- PTR_CBYTE gcInfoAddr,
+ GCInfoToken gcInfoToken,
GcInfoDecoderFlags flags,
UINT32 breakOffset
)
- : m_Reader( gcInfoAddr
-#ifdef VERIFY_GCINFO
- + sizeof(size_t)
-#endif
- )
+ : m_Reader(dac_cast<PTR_CBYTE>(gcInfoToken.Info))
, m_InstructionOffset(breakOffset)
, m_IsInterruptible(false)
#ifdef _DEBUG
, m_Flags( flags )
- , m_GcInfoAddress(gcInfoAddr)
-#endif
-#ifdef VERIFY_GCINFO
- , m_DbgDecoder(gcInfoAddr+
- (((UINT32)((PTR_BYTE)(TADDR)gcInfoAddr)[3])<<24)+
- (((UINT32)((PTR_BYTE)(TADDR)gcInfoAddr)[2])<<16)+
- (((UINT32)((PTR_BYTE)(TADDR)gcInfoAddr)[1])<<8)+
- ((PTR_BYTE)(TADDR)gcInfoAddr)[0],
- flags, breakOffset)
+ , m_GcInfoAddress(dac_cast<PTR_CBYTE>(gcInfoToken.Info))
+ , m_Version(gcInfoToken.Version)
#endif
{
_ASSERTE( (flags & (DECODE_INTERRUPTIBILITY | DECODE_GC_LIFETIMES)) || (0 == breakOffset) );
@@ -320,30 +308,6 @@ GcInfoDecoder::GcInfoDecoder(
{
EnumerateInterruptibleRanges(&SetIsInterruptibleCB, this);
}
-
-#ifdef VERIFY_GCINFO
-#if 0
- if(flags & DECODE_INTERRUPTIBILITY)
- _ASSERTE(IsInterruptible() == m_DbgDecoder.IsInterruptible());
-#endif
- if(flags & DECODE_SECURITY_OBJECT)
- _ASSERTE(GetSecurityObjectStackSlot() == m_DbgDecoder.GetSecurityObjectStackSlot());
- if(flags & DECODE_GENERICS_INST_CONTEXT)
- {
- _ASSERTE(GetGenericsInstContextStackSlot() == m_DbgDecoder.GetGenericsInstContextStackSlot());
- _ASSERTE(GetPSPSymStackSlot() == m_DbgDecoder.GetPSPSymStackSlot());
- }
- if(flags & DECODE_VARARG)
- _ASSERTE(GetIsVarArg() == m_DbgDecoder.GetIsVarArg());
- if(flags & DECODE_CODE_LENGTH)
- _ASSERTE(GetCodeLength() == m_DbgDecoder.GetCodeLength());
- _ASSERTE(GetStackBaseRegister() == m_DbgDecoder.GetStackBaseRegister());
- _ASSERTE(GetSizeOfEditAndContinuePreservedArea() == m_DbgDecoder.GetSizeOfEditAndContinuePreservedArea());
-#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
- _ASSERTE(GetSizeOfStackParameterArea() == m_DbgDecoder.GetSizeOfStackParameterArea());
-#endif
-#endif
-
}
bool GcInfoDecoder::IsInterruptible()
@@ -587,16 +551,6 @@ bool GcInfoDecoder::EnumerateLiveSlots(
return true;
}
-#ifdef VERIFY_GCINFO
- m_DbgDecoder.EnumerateLiveSlots(
- pRD,
- reportScratchSlots,
- inputFlags,
- pCallBack,
- hCallBack
- );
-#endif
-
//
// If this is a non-leaf frame and we are executing a call, the unwinder has given us the PC
// of the call instruction. We should adjust it to the PC of the instruction after the call in order to
@@ -1073,13 +1027,6 @@ ReportUntracked:
ExitSuccess:
#endif
-#ifdef VERIFY_GCINFO
-#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
- if(!executionAborted)
-#endif
- m_DbgDecoder.DoFinalVerification();
-#endif
-
return true;
}
diff --git a/src/vm/stackwalk.h b/src/vm/stackwalk.h
index 3d6dbdcb14..004d673a2a 100644
--- a/src/vm/stackwalk.h
+++ b/src/vm/stackwalk.h
@@ -324,6 +324,13 @@ public:
return &codeInfo;
}
+ GCInfoToken GetGCInfoToken()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(isFrameless);
+ return codeInfo.GetGCInfoToken();
+ }
+
PTR_VOID GetGCInfo()
{
LIMITED_METHOD_DAC_CONTRACT;
diff --git a/src/vm/wks/wks.targets b/src/vm/wks/wks.targets
index b9f57e882e..04562365f6 100644
--- a/src/vm/wks/wks.targets
+++ b/src/vm/wks/wks.targets
@@ -251,7 +251,6 @@
<ItemGroup>
<CppCompile Include="$(VmSourcesDir)\AssemblyNativeResource.cpp" />
<CppCompile Include="$(VmSourcesDir)\coverage.cpp" />
- <CppCompile Include="$(VmSourcesDir)\DbgGcInfoDecoder.cpp" />
<CppCompile Include="$(VmSourcesDir)\dwreport.cpp" />
<CppCompile Include="$(VmSourcesDir)\EnCEE.cpp" />
<CppCompile Include="$(VmSourcesDir)\ExceptionHandling.cpp" />
diff --git a/tests/buildtest.cmd b/tests/buildtest.cmd
index 6fcb3e7467..d5f89358ca 100644
--- a/tests/buildtest.cmd
+++ b/tests/buildtest.cmd
@@ -33,6 +33,7 @@ set __BuildSequential=
set __TestPriority=
set __msbuildCleanBuildArgs=
set __verbosity=normal
+set __UpdateInvalidPackagesArg=
REM unprocessedBuildArgs are args that we pass to msbuild (e.g. /p:__BuildArch=x64)
set "__args= %*"
@@ -49,32 +50,34 @@ if /i "%1" == "-h" goto Usage
if /i "%1" == "/help" goto Usage
if /i "%1" == "-help" goto Usage
-if /i "%1" == "x64" (set __BuildArch=x64&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "x86" (set __BuildArch=x86&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "arm" (set __BuildArch=arm&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "arm64" (set __BuildArch=arm64&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "x64" (set __BuildArch=x64&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "x86" (set __BuildArch=x86&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "arm" (set __BuildArch=arm&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "arm64" (set __BuildArch=arm64&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+
+if /i "%1" == "debug" (set __BuildType=Debug&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "release" (set __BuildType=Release&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "checked" (set __BuildType=Checked&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "debug" (set __BuildType=Debug&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "release" (set __BuildType=Release&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "checked" (set __BuildType=Checked&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "clean" (set __CleanBuild=1&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "clean" (set __CleanBuild=1&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "vs2013" (set __VSVersion=%1&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "vs2015" (set __VSVersion=%1&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "vs2013" (set __VSVersion=%1&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "vs2015" (set __VSVersion=%1&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "crossgen" (set __crossgen=true&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "ilasmroundtrip" (set __ILAsmRoundtrip=true&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "sequential" (set __BuildSequential=1&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "priority" (set __TestPriority=%2&set processedArgs=!processedArgs! %1 %2&shift&shift&goto Arg_Loop)
-if /i "%1" == "crossgen" (set __crossgen=true&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "ilasmroundtrip" (set __ILAsmRoundtrip=true&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "sequential" (set __BuildSequential=1&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "priority" (set __TestPriority=%2&set processedArgs=!processedArgs! %1 %2&shift&shift&goto Arg_Loop)
+if /i "%1" == "verbose" (set __verbosity=detailed&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "verbose" (set __verbosity=detailed&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "skipmanaged" (set __SkipManaged=1&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
-if /i "%1" == "skipmanaged" (set __SkipManaged=1&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
+if /i "%1" == "updateinvalidpackages" (set __UpdateInvalidPackagesArg=/t:UpdateInvalidPackageVersions&set processedArgs=!processedArgs! %1&shift&goto Arg_Loop)
@REM It was initially /toolset_dir. Not sure why, since it doesn't match the other usage.
-if /i "%1" == "/toolset_dir" (set __ToolsetDir=%2&set __PassThroughArgs=%__PassThroughArgs% %2&set processedArgs=!processedArgs! %1 %2&shift&shift&goto Arg_Loop)
-if /i "%1" == "toolset_dir" (set __ToolsetDir=%2&set __PassThroughArgs=%__PassThroughArgs% %2&set processedArgs=!processedArgs! %1 %2&shift&shift&goto Arg_Loop)
+if /i "%1" == "/toolset_dir" (set __ToolsetDir=%2&set __PassThroughArgs=%__PassThroughArgs% %2&set processedArgs=!processedArgs! %1 %2&shift&shift&goto Arg_Loop)
+if /i "%1" == "toolset_dir" (set __ToolsetDir=%2&set __PassThroughArgs=%__PassThroughArgs% %2&set processedArgs=!processedArgs! %1 %2&shift&shift&goto Arg_Loop)
if [!processedArgs!]==[] (
call set unprocessedBuildArgs=!__args!
@@ -183,6 +186,8 @@ REM ===
REM =========================================================================================
call %__TestDir%\setup-runtime-dependencies.cmd /arch %__BuildArch% /outputdir %__BinDir%
+if NOT "%__UpdateInvalidPackagesArg%" == "" goto skipnative
+
REM =========================================================================================
REM ===
REM === Native test build section
@@ -237,6 +242,8 @@ if errorlevel 1 exit /b 1
REM endlocal to rid us of environment changes from vcvarsall.bat
endlocal
+:skipnative
+
if defined __SkipManaged exit /b 0
REM =========================================================================================
@@ -272,7 +279,7 @@ if defined __TestPriority (
)
set __BuildLogRootName=Tests_Managed
-call :msbuild "%__ProjectFilesDir%\build.proj" %__msbuildManagedBuildArgs%
+call :msbuild "%__ProjectFilesDir%\build.proj" %__msbuildManagedBuildArgs% %__UpdateInvalidPackagesArg%
if errorlevel 1 exit /b 1
set CORE_ROOT=%__TestBinDir%\Tests\Core_Root
@@ -359,6 +366,7 @@ echo 666: Build all tests with priority 0, 1 ... 666
echo sequential: force a non-parallel build ^(default is to build in parallel
echo using all processors^).
echo IlasmRoundTrip: enables ilasm round trip build and run of the tests before executing them.
+echo updateinvalidpackages: enables updating package versions in all test project.json files
echo verbose: enables detailed file logging for the msbuild tasks into the msbuild log file.
exit /b 1
diff --git a/tests/dir.props b/tests/dir.props
index 870d4e39be..cf03387845 100644
--- a/tests/dir.props
+++ b/tests/dir.props
@@ -42,20 +42,28 @@
<PropertyGroup>
<ValidatePackageVersions>true</ValidatePackageVersions>
<ProhibitFloatingDependencies>true</ProhibitFloatingDependencies>
+
+ <CoreFxExpectedPrerelease>rc3-24117-00</CoreFxExpectedPrerelease>
+ <CoreClrPackageVersion>beta-24314-02</CoreClrPackageVersion>
+
+ <CoreFxVersionsIdentityRegex>^(?i)((System\..*)|(Microsoft\.CSharp)|(Microsoft\.NETCore.*)|(Microsoft\.Win32\..*)|(Microsoft\.VisualBasic))(?&lt;!TestData)$</CoreFxVersionsIdentityRegex>
</PropertyGroup>
<ItemGroup>
- <ValidationPattern Include="^(?i)((System\..%2A)|(Microsoft\.CSharp)|(Microsoft\.NETCore.%2A)|(Microsoft\.Win32\..%2A)|(Microsoft\.VisualBasic))(?&lt;!TestData)$">
- <ExpectedPrerelease>rc3-24117-00</ExpectedPrerelease>
+ <ValidationPattern Include="CoreFxVersions">
+ <IdentityRegex>$(CoreFxVersionsIdentityRegex)</IdentityRegex>
+ <ExpectedPrerelease>$(CoreFxExpectedPrerelease)</ExpectedPrerelease>
</ValidationPattern>
- <ValidationPattern Include="^(?i)(xunit(\.assert|\.core|\.runner\.(utility|msbuild))?)$">
+ <ValidationPattern Include="XunitPackageVersions">
+ <IdentityRegex>^(?i)(xunit(\.assert|\.core|\.runner\.(utility|msbuild))?)$</IdentityRegex>
<ExpectedVersion>$(XunitPackageVersion)</ExpectedVersion>
</ValidationPattern>
- <!-- Add a dummy value so that the item isn't removed by msbuild. Without the | this item doesn't show up later. -->
- <ValidationPattern Include="^(?i)(xunit\.console\.netcore|dummy value)$">
+ <ValidationPattern Include="XunitConsoleVersion">
+ <IdentityRegex>^(?i)(xunit\.console\.netcore)$</IdentityRegex>
<ExpectedVersion>1.0.2-prerelease-00101</ExpectedVersion>
</ValidationPattern>
- <ValidationPattern Include="^(?i)Microsoft\.DotNet\.xunit\.performance.%2A$">
+ <ValidationPattern Include="XunitPerformanceVersion">
+ <IdentityRegex>^(?i)Microsoft\.DotNet\.xunit\.performance.*$</IdentityRegex>
<ExpectedVersion>1.0.0-alpha-build0035</ExpectedVersion>
</ValidationPattern>
</ItemGroup>
diff --git a/tests/runtest.proj b/tests/runtest.proj
index ecca866cbf..aafa025b7a 100644
--- a/tests/runtest.proj
+++ b/tests/runtest.proj
@@ -362,12 +362,12 @@ namespace $([System.String]::Copy($(Category)).Replace(".","_").Replace("\","").
<PropertyGroup>
<CoreclrPackageFileName>%(CoreclrPackage.Filename)</CoreclrPackageFileName>
<!-- Get package version number from nuget package filename at core_root -->
- <CoreclrPackageVersion>$([System.String]::Copy('$(CoreclrPackageFileName)').Replace('Microsoft.NETCore.Runtime.CoreCLR.',''))</CoreclrPackageVersion>
+ <CoreClrPackageVersion Condition="'$(BuildTestsAgainstPackages)'!='true'">$([System.String]::Copy('$(CoreclrPackageFileName)').Replace('Microsoft.NETCore.Runtime.CoreCLR.',''))</CoreClrPackageVersion>
<TestRuntimeJsonContents>
<![CDATA[
{
"dependencies": {
- "Microsoft.NETCore.Runtime.CoreCLR": "$(CoreclrPackageVersion)",
+ "Microsoft.NETCore.Runtime.CoreCLR": "$(CoreClrPackageVersion)",
"Microsoft.NETCore.TestHost": "1.0.0-rc3-24117-00"
},
"frameworks": {
diff --git a/tests/scripts/arm32_ci_script.sh b/tests/scripts/arm32_ci_script.sh
index 223da2e26c..e3881274f0 100755
--- a/tests/scripts/arm32_ci_script.sh
+++ b/tests/scripts/arm32_ci_script.sh
@@ -102,7 +102,15 @@ function check_git_head {
function unmount_rootfs {
local rootfsFolder="$1"
- if grep -qs "$rootfsFolder" /proc/mounts; then
+ #Check if there are any open files in this directory.
+ if [ -d $rootfsFolder ]; then
+ #If we find information about the file
+ if sudo lsof +D $rootfsFolder; then
+ (set +x; echo 'See above for lsof information. Continuing with the build.')
+ fi
+ fi
+
+ if mountpoint -q -- "$rootfsFolder"; then
sudo umount "$rootfsFolder"
fi
}