summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/ToolBox/SOS/Strike/eeheap.cpp9
-rw-r--r--src/ToolBox/SOS/Strike/sos.def18
-rw-r--r--src/ToolBox/SOS/Strike/strike.cpp296
-rw-r--r--src/classlibnative/bcltype/system.cpp2
-rw-r--r--src/debug/daccess/daccess.cpp19
-rw-r--r--src/debug/daccess/dacimpl.h9
-rw-r--r--src/debug/daccess/enummem.cpp7
-rw-r--r--src/debug/daccess/request.cpp87
-rw-r--r--src/debug/daccess/request_svr.cpp25
-rw-r--r--src/debug/ee/controller.cpp42
-rw-r--r--src/debug/ee/controller.h6
-rw-r--r--src/debug/ee/functioninfo.cpp1
-rw-r--r--src/gc/gc.cpp739
-rw-r--r--src/gc/gc.h25
-rw-r--r--src/gc/gccommon.cpp11
-rw-r--r--src/gc/gcpriv.h120
-rw-r--r--src/gc/gcrecord.h131
-rw-r--r--src/gc/handletable.cpp51
-rw-r--r--src/gc/handletablescan.cpp59
-rw-r--r--src/gc/objecthandle.cpp1
-rw-r--r--src/inc/clrconfigvalues.h7
-rw-r--r--src/inc/corerror.xml5
-rw-r--r--src/inc/corprof.idl83
-rw-r--r--src/inc/dacprivate.h55
-rw-r--r--src/inc/dacvars.h8
-rw-r--r--src/inc/profilepriv.inl15
-rw-r--r--src/inc/simplerhash.h47
-rw-r--r--src/inc/sospriv.idl12
-rw-r--r--src/inc/switches.h2
-rw-r--r--src/md/winmd/inc/adapter.h4
-rw-r--r--src/mscorlib/src/System/AppContext/AppContext.cs4
-rw-r--r--src/mscorlib/src/System/Collections/Concurrent/ConcurrentDictionary.cs5
-rw-r--r--src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleTypeInfos.cs16
-rw-r--r--src/mscorlib/src/System/Globalization/CultureData.cs33
-rw-r--r--src/mscorlib/src/mscorlib.txt2
-rw-r--r--src/pal/prebuilt/idl/clrinternal_i.c12
-rw-r--r--src/pal/prebuilt/idl/corprof_i.c6
-rw-r--r--src/pal/prebuilt/inc/clretwall.h66
-rw-r--r--src/pal/prebuilt/inc/clretwallmain.h2
-rw-r--r--src/pal/prebuilt/inc/corerror.h1
-rw-r--r--src/pal/prebuilt/inc/corprof.h1646
-rw-r--r--src/pal/prebuilt/inc/sospriv.h109
-rw-r--r--src/vm/ceeload.cpp41
-rw-r--r--src/vm/ceeload.h2
-rw-r--r--src/vm/codeman.cpp204
-rw-r--r--src/vm/codeman.h20
-rw-r--r--src/vm/compile.cpp705
-rw-r--r--src/vm/debugdebugger.h9
-rw-r--r--src/vm/eepolicy.cpp2
-rw-r--r--src/vm/eetoprofinterfaceimpl.cpp106
-rw-r--r--src/vm/eetoprofinterfaceimpl.h7
-rw-r--r--src/vm/eetoprofinterfaceimpl.inl6
-rw-r--r--src/vm/eventreporter.cpp102
-rw-r--r--src/vm/i386/stublinkerx86.cpp43
-rw-r--r--src/vm/i386/stublinkerx86.h20
-rw-r--r--src/vm/pefile.cpp11
-rw-r--r--src/vm/proftoeeinterfaceimpl.cpp185
-rw-r--r--src/vm/proftoeeinterfaceimpl.h26
-rw-r--r--src/vm/threaddebugblockinginfo.cpp7
59 files changed, 4724 insertions, 570 deletions
diff --git a/src/ToolBox/SOS/Strike/eeheap.cpp b/src/ToolBox/SOS/Strike/eeheap.cpp
index 4b4fa1716e..d9484d2a0a 100644
--- a/src/ToolBox/SOS/Strike/eeheap.cpp
+++ b/src/ToolBox/SOS/Strike/eeheap.cpp
@@ -537,11 +537,11 @@ void GCPrintLargeHeapSegmentInfo(const DacpGcHeapDetails &heap, DWORD_PTR &total
void GCHeapInfo(const DacpGcHeapDetails &heap, DWORD_PTR &total_size)
{
GCPrintGenerationInfo(heap);
- ExtOut(WIN64_8SPACES " segment " WIN64_8SPACES " begin " WIN64_8SPACES "allocated size\n");
+ ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s\n", "segment", "begin", "allocated", "size");
GCPrintSegmentInfo(heap, total_size);
ExtOut("Large object heap starts at 0x%p\n",
(ULONG64)heap.generation_table[GetMaxGeneration()+1].allocation_start);
- ExtOut(WIN64_8SPACES " segment " WIN64_8SPACES " begin " WIN64_8SPACES "allocated size\n");
+ ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s\n", "segment", "begin", "allocated", "size");
GCPrintLargeHeapSegmentInfo(heap,total_size);
}
@@ -758,7 +758,7 @@ void GCGenUsageStats(TADDR start, TADDR end, const std::unordered_set<TADDR> &li
{
genUsage->freed += objSize;
}
- else if (liveObjs.find(taddrObj) == liveObjs.end())
+ else if (!(liveObjs.empty()) && liveObjs.find(taddrObj) == liveObjs.end())
{
genUsage->unrooted += objSize;
}
@@ -782,7 +782,8 @@ BOOL GCHeapUsageStats(const DacpGcHeapDetails& heap, BOOL bIncUnreachable, HeapU
#ifndef FEATURE_PAL
// this will create the bitmap of rooted objects only if bIncUnreachable is true
GCRootImpl gcroot;
- const std::unordered_set<TADDR> &liveObjs = gcroot.GetLiveObjects();
+ std::unordered_set<TADDR> emptyLiveObjs;
+ const std::unordered_set<TADDR> &liveObjs = (bIncUnreachable ? gcroot.GetLiveObjects() : emptyLiveObjs);
// 1a. enumerate all non-ephemeral segments
while (taddrSeg != (TADDR)heap.generation_table[0].start_segment)
diff --git a/src/ToolBox/SOS/Strike/sos.def b/src/ToolBox/SOS/Strike/sos.def
index e7a7ff9c7a..94dd351a28 100644
--- a/src/ToolBox/SOS/Strike/sos.def
+++ b/src/ToolBox/SOS/Strike/sos.def
@@ -21,7 +21,15 @@ EXPORTS
dumpdomain=DumpDomain
#ifdef TRACE_GC
DumpGCLog
-#endif
+ dumpgclog=DumpGCLog
+ dlog=DumpGCLog
+#endif
+ DumpGCData
+ dumpgcdata=DumpGCData
+ dgc=DumpGCData
+ DumpGCConfigLog
+ dumpgcconfiglog=DumpGCConfigLog
+ dclog=DumpGCConfigLog
DumpHeap
dumpheap=DumpHeap
DumpIL
@@ -168,6 +176,10 @@ EXPORTS
filthint
#endif
+#ifdef FEATURE_PAL
+ SetClrDebugDll
+ UnloadClrDebugDll
+#else
_EFN_GetManagedExcepStack
_EFN_GetManagedExcepStackW
_EFN_GetManagedObjectFieldInfo
@@ -194,6 +206,8 @@ EXPORTS
procinfo=ProcInfo
VerifyStackTrace
WatsonBuckets
+#endif // !FEATURE_PAL
+
#ifdef FEATURE_CORESYSTEM
// Only documented for Apollo internal usage
@@ -214,4 +228,4 @@ EXPORTS
getCodeTypeFlags=GetCodeTypeFlags
TraceToCode
tracetocode=TraceToCode
-#endif \ No newline at end of file
+#endif
diff --git a/src/ToolBox/SOS/Strike/strike.cpp b/src/ToolBox/SOS/Strike/strike.cpp
index 2434345d3c..730850eeca 100644
--- a/src/ToolBox/SOS/Strike/strike.cpp
+++ b/src/ToolBox/SOS/Strike/strike.cpp
@@ -2083,7 +2083,7 @@ struct StackTraceElement
#if defined(FEATURE_EXCEPTIONDISPATCHINFO)
// TRUE if this element represents the last frame of the foreign
// exception stack trace.
- BOOL fIsLastFrameFromForeignStackTrace;
+ BOOL fIsLastFrameFromForeignStackTrace;
#endif // defined(FEATURE_EXCEPTIONDISPATCHINFO)
};
@@ -8431,7 +8431,6 @@ DECLARE_API (DumpGCLog)
INIT_API_NODAC();
MINIDUMP_NOT_SUPPORTED();
-
if (GetEEFlavor() == UNKNOWNEE)
{
ExtOut("CLR not loaded\n");
@@ -8446,7 +8445,6 @@ DECLARE_API (DumpGCLog)
if (*args != 0)
fileName = args;
- // Try to find stress log symbols
DWORD_PTR dwAddr = GetValueFromExpression(MAIN_CLR_MODULE_NAME_A "!SVR::gc_log_buffer");
moveN (dwAddr, dwAddr);
@@ -8460,7 +8458,7 @@ DECLARE_API (DumpGCLog)
return E_FAIL;
}
}
-
+
ExtOut("Dumping GC log at %08x\n", dwAddr);
g_bDacBroken = FALSE;
@@ -8511,6 +8509,7 @@ DECLARE_API (DumpGCLog)
DWORD dwWritten = 0;
WriteFile (hGCLog, bGCLog, iRealLogSize + 1, &dwWritten, NULL);
+
Status = S_OK;
exit:
@@ -8529,9 +8528,296 @@ exit:
return Status;
}
-
#endif //TRACE_GC
+DECLARE_API (DumpGCConfigLog)
+{
+ INIT_API();
+#ifdef GC_CONFIG_DRIVEN
+ MINIDUMP_NOT_SUPPORTED();
+
+ if (GetEEFlavor() == UNKNOWNEE)
+ {
+ ExtOut("CLR not loaded\n");
+ return Status;
+ }
+
+ const char* fileName = "GCConfigLog.txt";
+
+ while (isspace (*args))
+ args ++;
+
+ if (*args != 0)
+ fileName = args;
+
+ if (!InitializeHeapData ())
+ {
+ ExtOut("GC Heap not initialized yet.\n");
+ return S_OK;
+ }
+
+ BOOL fIsServerGC = IsServerBuild();
+
+ DWORD_PTR dwAddr = 0;
+ DWORD_PTR dwAddrOffset = 0;
+
+ if (fIsServerGC)
+ {
+ dwAddr = GetValueFromExpression(MAIN_CLR_MODULE_NAME_A "!SVR::gc_config_log_buffer");
+ dwAddrOffset = GetValueFromExpression(MAIN_CLR_MODULE_NAME_A "!SVR::gc_config_log_buffer_offset");
+ }
+ else
+ {
+ dwAddr = GetValueFromExpression(MAIN_CLR_MODULE_NAME_A "!WKS::gc_config_log_buffer");
+ dwAddrOffset = GetValueFromExpression(MAIN_CLR_MODULE_NAME_A "!WKS::gc_config_log_buffer_offset");
+ }
+
+ moveN (dwAddr, dwAddr);
+ moveN (dwAddrOffset, dwAddrOffset);
+
+ if (dwAddr == 0)
+ {
+ ExtOut("Can't get either WKS or SVR GC's config log buffer");
+ return E_FAIL;
+ }
+
+ ExtOut("Dumping GC log at %08x\n", dwAddr);
+
+ g_bDacBroken = FALSE;
+
+ ExtOut("Attempting to dump GC log to file '%s'\n", fileName);
+
+ Status = E_FAIL;
+
+ HANDLE hGCLog = CreateFileA(
+ fileName,
+ GENERIC_WRITE,
+ FILE_SHARE_READ,
+ NULL,
+ OPEN_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL,
+ NULL);
+
+ if (hGCLog == INVALID_HANDLE_VALUE)
+ {
+ ExtOut("failed to create file: %d\n", GetLastError());
+ goto exit;
+ }
+
+ int iLogSize = (int)dwAddrOffset;
+
+ BYTE* bGCLog = new NOTHROW BYTE[iLogSize];
+ if (bGCLog == NULL)
+ {
+ ReportOOM();
+ goto exit;
+ }
+
+ memset (bGCLog, 0, iLogSize);
+ if (!SafeReadMemory(dwAddr, bGCLog, iLogSize, NULL))
+ {
+ ExtOut("failed to read memory from %08x\n", dwAddr);
+ }
+
+ SetFilePointer (hGCLog, 0, 0, FILE_END);
+ DWORD dwWritten = 0;
+ WriteFile (hGCLog, bGCLog, iLogSize, &dwWritten, NULL);
+
+ Status = S_OK;
+
+exit:
+
+ if (hGCLog != INVALID_HANDLE_VALUE)
+ {
+ CloseHandle (hGCLog);
+ }
+
+ if (Status == S_OK)
+ ExtOut("SUCCESS: Stress log dumped\n");
+ else if (Status == S_FALSE)
+ ExtOut("No Stress log in the image, no file written\n");
+ else
+ ExtOut("FAILURE: Stress log not dumped\n");
+
+ return Status;
+#else
+ ExtOut("Not implemented\n");
+ return S_OK;
+#endif //GC_CONFIG_DRIVEN
+}
+
+#ifdef GC_CONFIG_DRIVEN
+static const char * const str_interesting_data_points[] =
+{
+ "pre short", // 0
+ "post short", // 1
+ "merged pins", // 2
+ "converted pins", // 3
+ "pre pin", // 4
+ "post pin", // 5
+ "pre and post pin", // 6
+ "pre short padded", // 7
+ "post short padded", // 7
+};
+
+static char* str_heap_compact_reasons[] =
+{
+ "low on ephemeral space",
+ "high fragmetation",
+ "couldn't allocate gaps",
+ "user specfied compact LOH",
+ "last GC before OOM",
+ "induced compacting GC",
+ "fragmented gen0 (ephemeral GC)",
+ "high memory load (ephemeral GC)",
+ "high memory load and frag",
+ "very high memory load and frag",
+ "no gc mode"
+};
+
+static BOOL gc_heap_compact_reason_mandatory_p[] =
+{
+ TRUE, //compact_low_ephemeral = 0,
+ FALSE, //compact_high_frag = 1,
+ TRUE, //compact_no_gaps = 2,
+ TRUE, //compact_loh_forced = 3,
+ TRUE, //compact_last_gc = 4
+ TRUE, //compact_induced_compacting = 5,
+ FALSE, //compact_fragmented_gen0 = 6,
+ FALSE, //compact_high_mem_load = 7,
+ TRUE, //compact_high_mem_frag = 8,
+ TRUE, //compact_vhigh_mem_frag = 9,
+ TRUE //compact_no_gc_mode = 10
+};
+
+static char* str_heap_expand_mechanisms[] =
+{
+ "reused seg with normal fit",
+ "reused seg with best fit",
+ "expand promoting eph",
+ "expand with a new seg",
+ "no memory for a new seg",
+ "expand in next full GC"
+};
+
+static char* str_bit_mechanisms[] =
+{
+ "using mark list",
+ "demotion"
+};
+
+static const char * const str_gc_global_mechanisms[] =
+{
+ "concurrent GCs",
+ "compacting GCs",
+ "promoting GCs",
+ "GCs that did demotion",
+ "card bundles",
+ "elevation logic"
+};
+
+void PrintInterestingGCInfo(DacpGCInterestingInfoData* dataPerHeap)
+{
+ ExtOut("Interesting data points\n");
+ size_t* data = dataPerHeap->interestingDataPoints;
+ for (int i = 0; i < NUM_GC_DATA_POINTS; i++)
+ {
+ ExtOut("%20s: %d\n", str_interesting_data_points[i], data[i]);
+ }
+
+ ExtOut("\nCompacting reasons\n");
+ data = dataPerHeap->compactReasons;
+ for (int i = 0; i < MAX_COMPACT_REASONS_COUNT; i++)
+ {
+ ExtOut("[%s]%35s: %d\n", (gc_heap_compact_reason_mandatory_p[i] ? "M" : "W"), str_heap_compact_reasons[i], data[i]);
+ }
+
+ ExtOut("\nExpansion mechanisms\n");
+ data = dataPerHeap->expandMechanisms;
+ for (int i = 0; i < MAX_EXPAND_MECHANISMS_COUNT; i++)
+ {
+ ExtOut("%30s: %d\n", str_heap_expand_mechanisms[i], data[i]);
+ }
+
+ ExtOut("\nOther mechanisms enabled\n");
+ data = dataPerHeap->bitMechanisms;
+ for (int i = 0; i < MAX_GC_MECHANISM_BITS_COUNT; i++)
+ {
+ ExtOut("%20s: %d\n", str_bit_mechanisms[i], data[i]);
+ }
+}
+#endif //GC_CONFIG_DRIVEN
+
+DECLARE_API(DumpGCData)
+{
+ INIT_API();
+
+#ifdef GC_CONFIG_DRIVEN
+ MINIDUMP_NOT_SUPPORTED();
+
+ if (!InitializeHeapData ())
+ {
+ ExtOut("GC Heap not initialized yet.\n");
+ return S_OK;
+ }
+
+ DacpGCInterestingInfoData interestingInfo;
+ interestingInfo.RequestGlobal(g_sos);
+ for (int i = 0; i < MAX_GLOBAL_GC_MECHANISMS_COUNT; i++)
+ {
+ ExtOut("%-30s: %d\n", str_gc_global_mechanisms[i], interestingInfo.globalMechanisms[i]);
+ }
+
+ ExtOut("\n[info per heap]\n");
+
+ if (!IsServerBuild())
+ {
+ if (interestingInfo.Request(g_sos) != S_OK)
+ {
+ ExtOut("Error requesting interesting GC info\n");
+ return E_FAIL;
+ }
+
+ PrintInterestingGCInfo(&interestingInfo);
+ }
+ else
+ {
+ DWORD dwNHeaps = GetGcHeapCount();
+ DWORD dwAllocSize;
+ if (!ClrSafeInt<DWORD>::multiply(sizeof(CLRDATA_ADDRESS), dwNHeaps, dwAllocSize))
+ {
+ ExtOut("Failed to get GCHeaps: integer overflow\n");
+ return Status;
+ }
+
+ CLRDATA_ADDRESS *heapAddrs = (CLRDATA_ADDRESS*)alloca(dwAllocSize);
+ if (g_sos->GetGCHeapList(dwNHeaps, heapAddrs, NULL) != S_OK)
+ {
+ ExtOut("Failed to get GCHeaps\n");
+ return Status;
+ }
+
+ for (DWORD n = 0; n < dwNHeaps; n ++)
+ {
+ if (interestingInfo.Request(g_sos, heapAddrs[n]) != S_OK)
+ {
+ ExtOut("Heap %d: Error requesting interesting GC info\n", n);
+ return E_FAIL;
+ }
+
+ ExtOut("--------info for heap %d--------\n", n);
+ PrintInterestingGCInfo(&interestingInfo);
+ ExtOut("\n");
+ }
+ }
+
+ return S_OK;
+#else
+ ExtOut("Not implemented\n");
+ return S_OK;
+#endif //GC_CONFIG_DRIVEN
+}
+
#ifndef FEATURE_PAL
/**********************************************************************\
* Routine Description: *
diff --git a/src/classlibnative/bcltype/system.cpp b/src/classlibnative/bcltype/system.cpp
index 3f27e07bd7..b198dd24a0 100644
--- a/src/classlibnative/bcltype/system.cpp
+++ b/src/classlibnative/bcltype/system.cpp
@@ -588,12 +588,10 @@ void SystemNative::GenericFailFast(STRINGREF refMesgString, EXCEPTIONREF refExce
}
#endif // !FEATURE_PAL
-#ifdef FEATURE_WINDOWSPHONE
// stash the user-provided exception object. this will be used as
// the inner exception object to the FatalExecutionEngineException.
if (gc.refExceptionForWatsonBucketing != NULL)
pThread->SetLastThrownObject(gc.refExceptionForWatsonBucketing);
-#endif // FEATURE_WINDOWSPHONE
EEPolicy::HandleFatalError(exitCode, retAddress, pszMessage);
diff --git a/src/debug/daccess/daccess.cpp b/src/debug/daccess/daccess.cpp
index ff3a8172ad..829ddd904f 100644
--- a/src/debug/daccess/daccess.cpp
+++ b/src/debug/daccess/daccess.cpp
@@ -3267,6 +3267,10 @@ ClrDataAccess::QueryInterface(THIS_
{
ifaceRet = static_cast<ISOSDacInterface2*>(this);
}
+ else if (IsEqualIID(interfaceId, __uuidof(ISOSDacInterface3)))
+ {
+ ifaceRet = static_cast<ISOSDacInterface3*>(this);
+ }
else
{
*iface = NULL;
@@ -6849,8 +6853,19 @@ ClrDataAccess::GetMDImport(const PEFile* peFile, const ReflectionModule* reflect
{
// Get the metadata
PTR_SBuffer metadataBuffer = reflectionModule->GetDynamicMetadataBuffer();
- mdBaseTarget = dac_cast<PTR_CVOID>((metadataBuffer->DacGetRawBuffer()).StartAddress());
- mdSize = metadataBuffer->GetSize();
+ if (metadataBuffer != PTR_NULL)
+ {
+ mdBaseTarget = dac_cast<PTR_CVOID>((metadataBuffer->DacGetRawBuffer()).StartAddress());
+ mdSize = metadataBuffer->GetSize();
+ }
+ else
+ {
+ if (throwEx)
+ {
+ DacError(E_FAIL);
+ }
+ return NULL;
+ }
}
else
{
diff --git a/src/debug/daccess/dacimpl.h b/src/debug/daccess/dacimpl.h
index f33b7fc42b..d20bedf1a4 100644
--- a/src/debug/daccess/dacimpl.h
+++ b/src/debug/daccess/dacimpl.h
@@ -857,7 +857,8 @@ class ClrDataAccess
: public IXCLRDataProcess2,
public ICLRDataEnumMemoryRegions,
public ISOSDacInterface,
- public ISOSDacInterface2
+ public ISOSDacInterface2,
+ public ISOSDacInterface3
{
public:
ClrDataAccess(ICorDebugDataTarget * pTarget, ICLRDataTarget * pLegacyTarget=0);
@@ -1192,6 +1193,11 @@ public:
virtual HRESULT STDMETHODCALLTYPE GetObjectExceptionData(CLRDATA_ADDRESS objAddr, struct DacpExceptionObjectData *data);
virtual HRESULT STDMETHODCALLTYPE IsRCWDCOMProxy(CLRDATA_ADDRESS rcwAddr, BOOL* isDCOMProxy);
+ // ISOSDacInterface3
+ virtual HRESULT STDMETHODCALLTYPE GetGCInterestingInfoData(CLRDATA_ADDRESS interestingInfoAddr, struct DacpGCInterestingInfoData *data);
+ virtual HRESULT STDMETHODCALLTYPE GetGCInterestingInfoStaticData(struct DacpGCInterestingInfoData *data);
+ virtual HRESULT STDMETHODCALLTYPE GetGCGlobalMechanisms(size_t* globalMechanisms);
+
//
// ClrDataAccess.
//
@@ -1271,6 +1277,7 @@ public:
DacpGcHeapDetails *detailsData);
HRESULT GetServerAllocData(unsigned int count, struct DacpGenerationAllocData *data, unsigned int *pNeeded);
HRESULT ServerOomData(CLRDATA_ADDRESS addr, DacpOomData *oomData);
+ HRESULT ServerGCInterestingInfoData(CLRDATA_ADDRESS addr, DacpGCInterestingInfoData *interestingInfoData);
HRESULT ServerGCHeapAnalyzeData(CLRDATA_ADDRESS heapAddr,
DacpGcHeapAnalyzeData *analyzeData);
diff --git a/src/debug/daccess/enummem.cpp b/src/debug/daccess/enummem.cpp
index 0d50ec6767..b65d086b55 100644
--- a/src/debug/daccess/enummem.cpp
+++ b/src/debug/daccess/enummem.cpp
@@ -802,6 +802,7 @@ HRESULT ClrDataAccess::EnumMemWalkStackHelper(CLRDataEnumMemoryFlags flags,
{
frameHadContext = false;
status = pStackWalk->GetFrame(&pFrame);
+ PCODE addr = NULL;
if (status == S_OK && pFrame != NULL)
{
// write out the code that ip pointed to
@@ -812,7 +813,8 @@ HRESULT ClrDataAccess::EnumMemWalkStackHelper(CLRDataEnumMemoryFlags flags,
{
// Enumerate the code around the call site to help debugger stack walking heuristics
::FillRegDisplay(&regDisp, &context);
- TADDR callEnd = PCODEToPINSTR(GetControlPC(&regDisp));
+ addr = GetControlPC(&regDisp);
+ TADDR callEnd = PCODEToPINSTR(addr);
DacEnumCodeForStackwalk(callEnd);
frameHadContext = true;
}
@@ -969,8 +971,7 @@ HRESULT ClrDataAccess::EnumMemWalkStackHelper(CLRDataEnumMemoryFlags flags,
DebugInfoManager::EnumMemoryRegionsForMethodDebugInfo(flags, pMethodDesc);
#ifdef WIN64EXCEPTIONS
- PCODE addr = pMethodDesc->GetNativeCode();
-
+
if (addr != NULL)
{
EECodeInfo codeInfo(addr);
diff --git a/src/debug/daccess/request.cpp b/src/debug/daccess/request.cpp
index af5848288a..1925465c92 100644
--- a/src/debug/daccess/request.cpp
+++ b/src/debug/daccess/request.cpp
@@ -3060,6 +3060,89 @@ ClrDataAccess::GetOOMData(CLRDATA_ADDRESS oomAddr, struct DacpOomData *data)
}
HRESULT
+ClrDataAccess::GetGCGlobalMechanisms(size_t* globalMechanisms)
+{
+#ifdef GC_CONFIG_DRIVEN
+ if (globalMechanisms == NULL)
+ return E_INVALIDARG;
+
+ SOSDacEnter();
+ memset(globalMechanisms, 0, (sizeof(size_t) * MAX_GLOBAL_GC_MECHANISMS_COUNT));
+
+ for (int i = 0; i < MAX_GLOBAL_GC_MECHANISMS_COUNT; i++)
+ {
+ globalMechanisms[i] = gc_global_mechanisms[i];
+ }
+
+ SOSDacLeave();
+ return hr;
+#else
+ return E_NOTIMPL;
+#endif //GC_CONFIG_DRIVEN
+}
+
+HRESULT
+ClrDataAccess::GetGCInterestingInfoStaticData(struct DacpGCInterestingInfoData *data)
+{
+#ifdef GC_CONFIG_DRIVEN
+ if (data == NULL)
+ return E_INVALIDARG;
+
+ SOSDacEnter();
+ memset(data, 0, sizeof(DacpGCInterestingInfoData));
+
+ if (!GCHeap::IsServerHeap())
+ {
+ for (int i = 0; i < NUM_GC_DATA_POINTS; i++)
+ data->interestingDataPoints[i] = WKS::interesting_data_per_heap[i];
+ for (int i = 0; i < MAX_COMPACT_REASONS_COUNT; i++)
+ data->compactReasons[i] = WKS::compact_reasons_per_heap[i];
+ for (int i = 0; i < MAX_EXPAND_MECHANISMS_COUNT; i++)
+ data->expandMechanisms[i] = WKS::expand_mechanisms_per_heap[i];
+ for (int i = 0; i < MAX_GC_MECHANISM_BITS_COUNT; i++)
+ data->bitMechanisms[i] = WKS::interesting_mechanism_bits_per_heap[i];
+ }
+ else
+ {
+ hr = E_FAIL;
+ }
+
+ SOSDacLeave();
+ return hr;
+#else
+ return E_NOTIMPL;
+#endif //GC_CONFIG_DRIVEN
+}
+
+HRESULT
+ClrDataAccess::GetGCInterestingInfoData(CLRDATA_ADDRESS interestingInfoAddr, struct DacpGCInterestingInfoData *data)
+{
+#ifdef GC_CONFIG_DRIVEN
+ if (interestingInfoAddr == 0 || data == NULL)
+ return E_INVALIDARG;
+
+ SOSDacEnter();
+ memset(data, 0, sizeof(DacpGCInterestingInfoData));
+
+ if (!GCHeap::IsServerHeap())
+ hr = E_FAIL; // doesn't make sense to call this on WKS mode
+
+#ifdef FEATURE_SVR_GC
+ else
+ hr = ServerGCInterestingInfoData(interestingInfoAddr, data);
+#else
+ _ASSERTE_MSG(false, "IsServerHeap returned true but FEATURE_SVR_GC not defined");
+ hr = E_NOTIMPL;
+#endif //FEATURE_SVR_GC
+
+ SOSDacLeave();
+ return hr;
+#else
+ return E_NOTIMPL;
+#endif //GC_CONFIG_DRIVEN
+}
+
+HRESULT
ClrDataAccess::GetHeapAnalyzeData(CLRDATA_ADDRESS addr, struct DacpGcHeapAnalyzeData *data)
{
if (addr == 0 || data == NULL)
@@ -3823,8 +3906,8 @@ HRESULT ClrDataAccess::GetClrWatsonBucketsWorker(Thread * pThread, GenericModeBl
OBJECTHANDLE ohThrowable = pThread->GetThrowableAsHandle();
if (ohThrowable != NULL)
{
- // Get the object from handle and check if the throwable is preallocated or not
- OBJECTREF oThrowable = ObjectFromHandle(ohThrowable);
+ // Get the object from handle and check if the throwable is preallocated or not
+ OBJECTREF oThrowable = ObjectFromHandle(ohThrowable);
if (oThrowable != NULL)
{
// Does the throwable have buckets?
diff --git a/src/debug/daccess/request_svr.cpp b/src/debug/daccess/request_svr.cpp
index 97b064898f..9dfd26046e 100644
--- a/src/debug/daccess/request_svr.cpp
+++ b/src/debug/daccess/request_svr.cpp
@@ -194,6 +194,31 @@ ClrDataAccess::ServerOomData(CLRDATA_ADDRESS addr, DacpOomData *oomData)
return S_OK;
}
+HRESULT
+ClrDataAccess::ServerGCInterestingInfoData(CLRDATA_ADDRESS addr, DacpGCInterestingInfoData *interestingInfoData)
+{
+#ifdef GC_CONFIG_DRIVEN
+ SVR::gc_heap *pHeap = PTR_SVR_gc_heap(TO_TADDR(addr));
+
+ size_t* dataPoints = (size_t*)&(pHeap->interesting_data_per_heap);
+ for (int i = 0; i < NUM_GC_DATA_POINTS; i++)
+ interestingInfoData->interestingDataPoints[i] = dataPoints[i];
+ size_t* mechanisms = (size_t*)&(pHeap->compact_reasons_per_heap);
+ for (int i = 0; i < MAX_COMPACT_REASONS_COUNT; i++)
+ interestingInfoData->compactReasons[i] = mechanisms[i];
+ mechanisms = (size_t*)&(pHeap->expand_mechanisms_per_heap);
+ for (int i = 0; i < MAX_EXPAND_MECHANISMS_COUNT; i++)
+ interestingInfoData->expandMechanisms[i] = mechanisms[i];
+ mechanisms = (size_t*)&(pHeap->interesting_mechanism_bits_per_heap);
+ for (int i = 0; i < MAX_GC_MECHANISM_BITS_COUNT; i++)
+ interestingInfoData->bitMechanisms[i] = mechanisms[i];
+
+ return S_OK;
+#else
+ return E_NOTIMPL;
+#endif //GC_CONFIG_DRIVEN
+}
+
HRESULT ClrDataAccess::ServerGCHeapAnalyzeData(CLRDATA_ADDRESS heapAddr, DacpGcHeapAnalyzeData *analyzeData)
{
if (!heapAddr)
diff --git a/src/debug/ee/controller.cpp b/src/debug/ee/controller.cpp
index 9988378c7a..4e1ab6557a 100644
--- a/src/debug/ee/controller.cpp
+++ b/src/debug/ee/controller.cpp
@@ -990,6 +990,7 @@ void DebuggerController::DeleteAllControllers()
while (pDebuggerController != NULL)
{
pNextDebuggerController = pDebuggerController->m_next;
+ pDebuggerController->DebuggerDetachClean();
pDebuggerController->Delete();
pDebuggerController = pNextDebuggerController;
}
@@ -1056,6 +1057,11 @@ void DebuggerController::Delete()
}
}
+void DebuggerController::DebuggerDetachClean()
+{
+ //do nothing here
+}
+
//static
void DebuggerController::AddRef(DebuggerControllerPatch *patch)
{
@@ -4463,6 +4469,42 @@ DebuggerPatchSkip::~DebuggerPatchSkip()
#endif
}
+void DebuggerPatchSkip::DebuggerDetachClean()
+{
+// Since for ARM SharedPatchBypassBuffer isn't existed, we don't have to anything here.
+#ifndef _TARGET_ARM_
+ // Fix for Bug 1176448
+ // When a debugger is detaching from the debuggee, we need to move the IP if it is pointing
+ // somewhere in PatchBypassBuffer.All managed threads are suspended during detach, so changing
+ // the context without notifications is safe.
+ // Notice:
+ // THIS FIX IS INCOMPLETE!It attempts to update the IP in the cases we can easily detect.However,
+ // if a thread is in pre - emptive mode, and its filter context has been propagated to a VEH
+ // context, then the filter context we get will be NULL and this fix will not work.Our belief is
+ // that this scenario is rare enough that it doesn’t justify the cost and risk associated with a
+ // complete fix, in which we would have to either :
+ // 1. Change the reference counting for DebuggerController and then change the exception handling
+ // logic in the debuggee so that we can handle the debugger event after detach.
+ // 2. Create a "stack walking" implementation for native code and use it to get the current IP and
+ // set the IP to the right place.
+
+ Thread *thread = GetThread();
+ if (thread != NULL)
+ {
+ BYTE *patchBypass = m_pSharedPatchBypassBuffer->PatchBypass;
+ CONTEXT *context = thread->GetFilterContext();
+ if (patchBypass != NULL &&
+ context != NULL &&
+ (size_t)GetIP(context) >= (size_t)patchBypass &&
+ (size_t)GetIP(context) <= (size_t)(patchBypass + MAX_INSTRUCTION_LENGTH + 1))
+ {
+ SetIP(context, (PCODE)((BYTE *)GetIP(context) - (patchBypass - (BYTE *)m_address)));
+ }
+ }
+#endif
+}
+
+
//
// We have to have a whole seperate function for this because you
// can't use __try in a function that requires object unwinding...
diff --git a/src/debug/ee/controller.h b/src/debug/ee/controller.h
index 4782565dfa..5a0d5cf027 100644
--- a/src/debug/ee/controller.h
+++ b/src/debug/ee/controller.h
@@ -1105,6 +1105,8 @@ private:
static void ApplyTraceFlag(Thread *thread);
static void UnapplyTraceFlag(Thread *thread);
+ virtual void DebuggerDetachClean();
+
public:
static const BYTE *g_pMSCorEEStart, *g_pMSCorEEEnd;
@@ -1324,7 +1326,7 @@ public:
// still send.
//
// Returns true if send an event, false elsewise.
- virtual bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
+ virtual bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
AppDomain *m_pAppDomain;
@@ -1380,6 +1382,8 @@ class DebuggerPatchSkip : public DebuggerController
void DecodeInstruction(CORDB_ADDRESS_TYPE *code);
+ void DebuggerDetachClean();
+
CORDB_ADDRESS_TYPE *m_address;
int m_iOrigDisp; // the original displacement of a relative call or jump
InstructionAttribute m_instrAttrib; // info about the instruction being skipped over
diff --git a/src/debug/ee/functioninfo.cpp b/src/debug/ee/functioninfo.cpp
index af52ab5c9d..ece74aa8ed 100644
--- a/src/debug/ee/functioninfo.cpp
+++ b/src/debug/ee/functioninfo.cpp
@@ -806,7 +806,6 @@ DWORD DebuggerJitInfo::MapNativeOffsetToIL(SIZE_T nativeOffsetToMap,
{
// If the caller requested to skip prologs, we simply restart the walk
// with the offset set to the end of the prolog.
- m = GetSequenceMap();
nativeOffset = m->nativeEndOffset;
continue;
}
diff --git a/src/gc/gc.cpp b/src/gc/gc.cpp
index 9dc6e13f2b..6ed21696a6 100644
--- a/src/gc/gc.cpp
+++ b/src/gc/gc.cpp
@@ -71,6 +71,10 @@ inline BOOL ShouldTrackMovementForProfilerOrEtw()
// 256 GC threads and 256 GC heaps.
#define MAX_SUPPORTED_CPUS 256
+#ifdef GC_CONFIG_DRIVEN
+int compact_ratio = 0;
+#endif //GC_CONFIG_DRIVEN
+
#if defined (TRACE_GC) && !defined (DACCESS_COMPILE)
const char * const allocation_state_str[] = {
"start",
@@ -362,7 +366,6 @@ void gc_heap::add_to_history()
#endif //BACKGROUND_GC
#ifdef TRACE_GC
-
BOOL gc_log_on = TRUE;
HANDLE gc_log = INVALID_HANDLE_VALUE;
size_t gc_log_file_size = 0;
@@ -439,9 +442,55 @@ void GCLog (const char *fmt, ... )
va_end(args);
}
}
-
#endif //TRACE_GC
+#ifdef GC_CONFIG_DRIVEN
+BOOL gc_config_log_on = FALSE;
+HANDLE gc_config_log = INVALID_HANDLE_VALUE;
+
+// we keep this much in a buffer and only flush when the buffer is full
+#define gc_config_log_buffer_size (1*1024) // TEMP
+BYTE* gc_config_log_buffer = 0;
+size_t gc_config_log_buffer_offset = 0;
+
+// For config since we log so little we keep the whole history. Also it's only
+// ever logged by one thread so no need to synchronize.
+void log_va_msg_config(const char *fmt, va_list args)
+{
+ const int BUFFERSIZE = 256;
+ static char rgchBuffer[BUFFERSIZE];
+ char * pBuffer = &rgchBuffer[0];
+
+ pBuffer[0] = '\r';
+ pBuffer[1] = '\n';
+ int buffer_start = 2;
+ int msg_len = _vsnprintf (&pBuffer[buffer_start], BUFFERSIZE - buffer_start, fmt, args );
+ assert (msg_len != -1);
+ msg_len += buffer_start;
+
+ if ((gc_config_log_buffer_offset + msg_len) > gc_config_log_buffer_size)
+ {
+ DWORD written_to_log = 0;
+ WriteFile (gc_config_log, gc_config_log_buffer, (DWORD)gc_config_log_buffer_offset, &written_to_log, NULL);
+ FlushFileBuffers (gc_config_log);
+ gc_config_log_buffer_offset = 0;
+ }
+
+ memcpy (gc_config_log_buffer + gc_config_log_buffer_offset, pBuffer, msg_len);
+ gc_config_log_buffer_offset += msg_len;
+}
+
+void GCLogConfig (const char *fmt, ... )
+{
+ if (gc_config_log_on && (gc_config_log != INVALID_HANDLE_VALUE))
+ {
+ va_list args;
+ va_start( args, fmt );
+ log_va_msg_config (fmt, args);
+ }
+}
+#endif //GC_CONFIG_DRIVEN
+
#ifdef SYNCHRONIZATION_STATS
// Number of GCs have we done since we last logged.
@@ -2207,7 +2256,7 @@ HANDLE* gc_heap::g_gc_threads;
size_t* gc_heap::g_promoted;
#ifdef MH_SC_MARK
-BOOL* gc_heap::g_mark_stack_busy;
+BOOL* gc_heap::g_mark_stack_busy;
#endif //MH_SC_MARK
@@ -2241,6 +2290,10 @@ size_t gc_heap::gc_last_ephemeral_decommit_time = 0;
size_t gc_heap::gc_gen0_desired_high;
+#ifdef SHORT_PLUGS
+float gc_heap::short_plugs_pad_ratio = 0;
+#endif //SHORT_PLUGS
+
#if defined(_WIN64)
#define MAX_ALLOWED_MEM_LOAD 85
@@ -2452,9 +2505,7 @@ size_t gc_heap::c_mark_list_length = 0;
size_t gc_heap::c_mark_list_index = 0;
-gc_history_per_heap gc_heap::saved_bgc_data_per_heap;
-
-BOOL gc_heap::bgc_data_saved_p = FALSE;
+gc_history_per_heap gc_heap::bgc_data_per_heap;
BOOL gc_heap::bgc_thread_running;
@@ -2527,6 +2578,11 @@ size_t gc_heap::current_obj_size = 0;
#endif //HEAP_ANALYZE
+#ifdef GC_CONFIG_DRIVEN
+size_t gc_heap::interesting_data_per_gc[max_idp_count];
+//size_t gc_heap::interesting_data_per_heap[max_idp_count];
+//size_t gc_heap::interesting_mechanisms_per_heap[max_im_count];
+#endif //GC_CONFIG_DRIVEN
#endif //MULTIPLE_HEAPS
no_gc_region_info gc_heap::current_no_gc_region_info;
@@ -2538,6 +2594,12 @@ heap_segment* gc_heap::segment_standby_list;
size_t gc_heap::last_gc_index = 0;
size_t gc_heap::min_segment_size = 0;
+#ifdef GC_CONFIG_DRIVEN
+size_t gc_heap::time_init = 0;
+size_t gc_heap::time_since_init = 0;
+size_t gc_heap::compact_or_sweep_gcs[2];
+#endif //GC_CONFIG_DRIVEN
+
#ifdef FEATURE_LOH_COMPACTION
BOOL gc_heap::loh_compaction_always_p = FALSE;
gc_loh_compaction_mode gc_heap::loh_compaction_mode = loh_compaction_default;
@@ -2573,6 +2635,12 @@ BOOL gc_heap::heap_analyze_enabled = FALSE;
extern "C" {
#endif //!DACCESS_COMPILE
GARY_IMPL(generation, generation_table,NUMBERGENERATIONS+1);
+#ifdef GC_CONFIG_DRIVEN
+GARY_IMPL(size_t, interesting_data_per_heap, max_idp_count);
+GARY_IMPL(size_t, compact_reasons_per_heap, max_compact_reasons_count);
+GARY_IMPL(size_t, expand_mechanisms_per_heap, max_expand_mechanisms_count);
+GARY_IMPL(size_t, interesting_mechanism_bits_per_heap, max_gc_mechanism_bits_count);
+#endif //GC_CONFIG_DRIVEN
#ifndef DACCESS_COMPILE
}
#endif //!DACCESS_COMPILE
@@ -2669,6 +2737,21 @@ void gc_generation_data::print (int heap_num, int gen_num)
#endif //SIMPLE_DPRINTF && DT_LOG
}
+void gc_history_per_heap::set_mechanism (gc_mechanism_per_heap mechanism_per_heap, DWORD value)
+{
+ DWORD* mechanism = &mechanisms[mechanism_per_heap];
+ *mechanism = 0;
+ *mechanism |= mechanism_mask;
+ *mechanism |= (1 << value);
+
+#ifdef DT_LOG
+ gc_mechanism_descr* descr = &gc_mechanisms_descr[mechanism_per_heap];
+ dprintf (DT_LOG_0, ("setting %s: %s",
+ descr->name,
+ (descr->descr)[value]));
+#endif //DT_LOG
+}
+
void gc_history_per_heap::print()
{
#if defined(SIMPLE_DPRINTF) && defined(DT_LOG)
@@ -2711,14 +2794,15 @@ void gc_history_global::print()
#ifdef DT_LOG
char str_settings[64];
memset (str_settings, '|', sizeof (char) * 64);
- str_settings[max_global_mechanism*2] = 0;
+ str_settings[max_global_mechanisms_count*2] = 0;
- for (int i = 0; i < max_global_mechanism; i++)
+ for (int i = 0; i < max_global_mechanisms_count; i++)
{
str_settings[i * 2] = (get_mechanism_p ((gc_global_mechanism_p)i) ? 'Y' : 'N');
}
dprintf (DT_LOG_0, ("[hp]|c|p|o|d|b|e|"));
+
dprintf (DT_LOG_0, ("%4d|%s", num_heaps, str_settings));
dprintf (DT_LOG_0, ("Condemned gen%d(reason: %s; mode: %s), youngest budget %Id(%d), memload %d",
condemned_generation,
@@ -2743,7 +2827,7 @@ void gc_heap::fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per
maxgen_size_info->running_free_list_efficiency,
current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons0(),
current_gc_data_per_heap->gen_to_condemn_reasons.get_reasons1(),
- current_gc_data_per_heap->mechanisms[gc_compact],
+ current_gc_data_per_heap->mechanisms[gc_heap_compact],
current_gc_data_per_heap->mechanisms[gc_heap_expand],
current_gc_data_per_heap->heap_index,
(BYTE*)(current_gc_data_per_heap->extra_gen0_committed),
@@ -4245,8 +4329,8 @@ gc_heap::compute_new_ephemeral_size()
#endif //RESPECT_LARGE_ALIGNMENT
#ifdef SHORT_PLUGS
- float pad_ratio = (float)24 / (float)DESIRED_PLUG_LENGTH;
- total_ephemeral_size += (size_t)((float)total_ephemeral_size * pad_ratio) + Align (min_obj_size);
+ total_ephemeral_size = Align ((size_t)((float)total_ephemeral_size * short_plugs_pad_ratio) + 1);
+ total_ephemeral_size += Align (min_obj_size);
#endif //SHORT_PLUGS
dprintf (3, ("total ephemeral size is %Ix, padding %Ix(%Ix)",
@@ -4299,7 +4383,7 @@ gc_heap::soh_get_segment_to_expand()
if (can_expand_into_p (seg, size/3, total_ephemeral_size, gen_alloc))
{
- gc_data_per_heap.set_mechanism (gc_heap_expand,
+ get_gc_data_per_heap()->set_mechanism (gc_heap_expand,
(use_bestfit ? expand_reuse_bestfit : expand_reuse_normal));
if (settings.condemned_generation == max_generation)
{
@@ -4331,6 +4415,7 @@ gc_heap::soh_get_segment_to_expand()
if (settings.pause_mode != pause_sustained_low_latency)
{
dprintf (GTC_LOG, ("max_gen-1: SustainedLowLatency is set, acquire a new seg"));
+ get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_next_full_gc);
return 0;
}
}
@@ -4357,7 +4442,7 @@ gc_heap::soh_get_segment_to_expand()
GetClrInstanceId());
}
- gc_data_per_heap.set_mechanism (gc_heap_expand, (result ? expand_new_seg : expand_no_memory));
+ get_gc_data_per_heap()->set_mechanism (gc_heap_expand, (result ? expand_new_seg : expand_no_memory));
if (result == 0)
{
@@ -4981,8 +5066,8 @@ void set_thread_group_affinity_for_heap(HANDLE gc_thread, int heap_number)
CPUGroupInfo::GetGroupForProcessor((WORD)heap_number, &gn, &gpn);
ga.Group = gn;
- ga.Reserved[0] = 0; // reserve must be filled with zero
- ga.Reserved[1] = 0; // otherwise call may fail
+ ga.Reserved[0] = 0; // reserve must be filled with zero
+ ga.Reserved[1] = 0; // otherwise call may fail
ga.Reserved[2] = 0;
int bit_number = 0;
@@ -5196,7 +5281,7 @@ DWORD gc_heap::gc_thread_function ()
#endif //MULTIPLE_HEAPS
void* virtual_alloc_commit_for_heap(void* addr, size_t size, DWORD type,
- DWORD prot, int h_number)
+ DWORD prot, int h_number)
{
#if defined(MULTIPLE_HEAPS) && !defined(FEATURE_REDHAWK) && !defined(FEATURE_PAL)
// Currently there is no way for us to specific the numa node to allocate on via hosting interfaces to
@@ -5487,6 +5572,7 @@ void gc_mechanisms::init_mechanisms()
heap_expansion = FALSE;
concurrent = FALSE;
demotion = FALSE;
+ elevation_reduced = FALSE;
found_finalizers = FALSE;
#ifdef BACKGROUND_GC
background_p = recursive_gc_sync::background_running_p() != FALSE;
@@ -5556,6 +5642,9 @@ void gc_mechanisms::record (gc_history_global* history)
if (card_bundles)
history->set_mechanism_p (global_card_bundles);
+
+ if (elevation_reduced)
+ history->set_mechanism_p (global_elevation);
}
/**********************************
@@ -7082,7 +7171,7 @@ fail:
void gc_heap::copy_brick_card_range (BYTE* la, DWORD* old_card_table,
short* old_brick_table,
heap_segment* seg,
- BYTE* start, BYTE* end, BOOL heap_expand)
+ BYTE* start, BYTE* end)
{
ptrdiff_t brick_offset = brick_of (start) - brick_of (la);
@@ -7163,7 +7252,6 @@ void gc_heap::copy_brick_card_range (BYTE* la, DWORD* old_card_table,
}
ct = card_table_next (ct);
}
-
}
//initialize all of the arrays managed by the card table for a page aligned range when an existing ro segment becomes in range
@@ -7193,7 +7281,7 @@ void gc_heap::init_brick_card_range (heap_segment* seg)
heap_segment_allocated (seg));
}
-void gc_heap::copy_brick_card_table(BOOL heap_expand)
+void gc_heap::copy_brick_card_table()
{
BYTE* la = lowest_address;
BYTE* ha = highest_address;
@@ -7282,8 +7370,7 @@ void gc_heap::copy_brick_card_table(BOOL heap_expand)
old_brick_table,
seg,
align_lower_page (heap_segment_mem (seg)),
- end,
- heap_expand);
+ end);
}
seg = heap_segment_next (seg);
}
@@ -7307,8 +7394,7 @@ void gc_heap::copy_brick_card_table(BOOL heap_expand)
0,
seg,
align_lower_page (heap_segment_mem (seg)),
- end,
- heap_expand);
+ end);
}
seg = heap_segment_next (seg);
}
@@ -7556,18 +7642,18 @@ private:
static void introsort_loop (BYTE** lo, BYTE** hi, int depth_limit)
{
- while (hi-lo >= size_threshold)
- {
- if (depth_limit == 0)
- {
- heapsort (lo, hi);
+ while (hi-lo >= size_threshold)
+ {
+ if (depth_limit == 0)
+ {
+ heapsort (lo, hi);
return;
- }
+ }
BYTE** p=median_partition (lo, hi);
- depth_limit=depth_limit-1;
+ depth_limit=depth_limit-1;
introsort_loop (p, hi, depth_limit);
hi=p-1;
- }
+ }
}
static BYTE** median_partition (BYTE** low, BYTE** high)
@@ -9385,19 +9471,11 @@ void gc_heap::adjust_ephemeral_limits ()
StompWriteBarrierEphemeral();
}
-HRESULT gc_heap::initialize_gc (size_t segment_size,
- size_t heap_size
-#ifdef MULTIPLE_HEAPS
- ,unsigned number_of_heaps
-#endif //MULTIPLE_HEAPS
-)
+#if defined(TRACE_GC) || defined(GC_CONFIG_DRIVEN)
+HANDLE CreateLogFile(const CLRConfig::ConfigStringInfo & info, BOOL is_config)
{
-#ifdef TRACE_GC
- int log_last_gcs = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCLogEnabled);
- if (log_last_gcs)
- {
- LPWSTR temp_logfile_name = NULL;
- CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCLogFile, &temp_logfile_name);
+ LPWSTR temp_logfile_name = NULL;
+ CLRConfig::GetConfigValue(info, &temp_logfile_name);
#ifdef FEATURE_REDHAWK
gc_log = PalCreateFileW(
@@ -9409,21 +9487,21 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
FILE_ATTRIBUTE_NORMAL,
NULL);
#else // FEATURE_REDHAWK
- char logfile_name[MAX_LONGPATH+1];
- if (temp_logfile_name != 0)
- {
- int ret;
- ret = WszWideCharToMultiByte(CP_ACP, 0, temp_logfile_name, -1, logfile_name, sizeof(logfile_name)-1, NULL, NULL);
- _ASSERTE(ret != 0);
- delete temp_logfile_name;
- }
+ char logfile_name[MAX_LONGPATH+1];
+ if (temp_logfile_name != 0)
+ {
+ int ret;
+ ret = WszWideCharToMultiByte(CP_ACP, 0, temp_logfile_name, -1, logfile_name, sizeof(logfile_name)-1, NULL, NULL);
+ _ASSERTE(ret != 0);
+ delete temp_logfile_name;
+ }
- char szPid[20];
- sprintf_s(szPid, _countof(szPid), ".%d", GetCurrentProcessId());
- strcat_s(logfile_name, _countof(logfile_name), szPid);
- strcat_s(logfile_name, _countof(logfile_name), ".log");
+ char szPid[20];
+ sprintf_s(szPid, _countof(szPid), ".%d", GetCurrentProcessId());
+ strcat_s(logfile_name, _countof(logfile_name), szPid);
+ strcat_s(logfile_name, _countof(logfile_name), (is_config ? ".config.log" : ".log"));
- gc_log = CreateFileA(
+ HANDLE hFile = CreateFileA(
logfile_name,
GENERIC_WRITE,
FILE_SHARE_READ,
@@ -9431,12 +9509,26 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
CREATE_ALWAYS,
FILE_ATTRIBUTE_NORMAL,
NULL);
-#endif // FEATURE_REDHAWK
+#endif //FEATURE_REDHAWK
+ return hFile;
+}
+#endif //TRACE_GC || GC_CONFIG_DRIVEN
+
+HRESULT gc_heap::initialize_gc (size_t segment_size,
+ size_t heap_size
+#ifdef MULTIPLE_HEAPS
+ ,unsigned number_of_heaps
+#endif //MULTIPLE_HEAPS
+)
+{
+#ifdef TRACE_GC
+ int log_last_gcs = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCLogEnabled);
+ if (log_last_gcs)
+ {
+ gc_log = CreateLogFile(CLRConfig::UNSUPPORTED_GCLogFile, FALSE);
if (gc_log == INVALID_HANDLE_VALUE)
- {
return E_FAIL;
- }
// GCLogFileSize in MBs.
gc_log_file_size = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCLogFileSize);
@@ -9451,16 +9543,58 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
gc_log_buffer = new (nothrow) BYTE [gc_log_buffer_size];
if (!gc_log_buffer)
{
+ CloseHandle(gc_log);
return E_FAIL;
}
+
memset (gc_log_buffer, '*', gc_log_buffer_size);
max_gc_buffers = gc_log_file_size * 1024 * 1024 / gc_log_buffer_size;
- //max_gc_buffers = gc_log_file_size * 1024 * 5/ gc_log_buffer_size;
-
}
#endif // TRACE_GC
+#ifdef GC_CONFIG_DRIVEN
+ gc_config_log_on = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCConfigLogEnabled);
+ if (gc_config_log_on)
+ {
+ gc_config_log = CreateLogFile(CLRConfig::UNSUPPORTED_GCConfigLogFile, TRUE);
+
+ if (gc_config_log == INVALID_HANDLE_VALUE)
+ return E_FAIL;
+
+ gc_config_log_buffer = new (nothrow) BYTE [gc_config_log_buffer_size];
+ if (!gc_config_log_buffer)
+ {
+ CloseHandle(gc_config_log);
+ return E_FAIL;
+ }
+
+ compact_ratio = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCCompactRatio);
+
+ // h# | GC | gen | C | EX | NF | BF | ML | DM || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP |
+ cprintf (("%2s | %6s | %1s | %1s | %2s | %2s | %2s | %2s | %2s || %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s | %5s |",
+ "h#", // heap index
+ "GC", // GC index
+ "g", // generation
+ "C", // compaction (empty means sweeping), 'M' means it was mandatory, 'W' means it was not
+ "EX", // heap expansion
+ "NF", // normal fit
+ "BF", // best fit (if it indicates neither NF nor BF it means it had to acquire a new seg.
+ "ML", // mark list
+ "DM", // demotion
+ "PreS", // short object before pinned plug
+ "PostS", // short object after pinned plug
+ "Merge", // merged pinned plugs
+ "Conv", // converted to pinned plug
+ "Pre", // plug before pinned plug but not after
+ "Post", // plug after pinned plug but not before
+ "PrPo", // plug both before and after pinned plug
+ "PreP", // pre short object padded
+ "PostP" // post short object padded
+ ));
+ }
+#endif //GC_CONFIG_DRIVEN
+
#ifdef GC_STATS
GCStatistics::logFileName = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_GCMixLog);
if (GCStatistics::logFileName != NULL)
@@ -9519,10 +9653,8 @@ HRESULT gc_heap::initialize_gc (size_t segment_size,
}
#endif //CARD_BUNDLE
- //Init the gc_mechanisms
settings.first_init();
- //g_highest_address = (BYTE*)0x7ffe0000;
g_card_table = make_card_table (g_lowest_address, g_highest_address);
if (!g_card_table)
@@ -9685,6 +9817,16 @@ gc_heap::init_semi_shared()
memset (&current_no_gc_region_info, 0, sizeof (current_no_gc_region_info));
+#ifdef GC_CONFIG_DRIVEN
+ compact_or_sweep_gcs[0] = 0;
+ compact_or_sweep_gcs[1] = 0;
+#endif //GC_CONFIG_DRIVEN
+
+#ifdef SHORT_PLUGS
+ size_t max_num_objects_in_plug = (size_t)DESIRED_PLUG_LENGTH / Align (min_obj_size);
+ short_plugs_pad_ratio = (float)(max_num_objects_in_plug + 1) / (float)max_num_objects_in_plug;
+#endif //SHORT_PLUGS
+
ret = 1;
cleanup:
@@ -10238,6 +10380,14 @@ gc_heap::init_gc_heap (int h_number)
bgc_overflow_count = 0;
end_loh_size = dd_min_gc_size (dynamic_data_of (max_generation + 1));
#endif //BACKGROUND_GC
+
+#ifdef GC_CONFIG_DRIVEN
+ memset (interesting_data_per_heap, 0, sizeof (interesting_data_per_heap));
+ memset(compact_reasons_per_heap, 0, sizeof (compact_reasons_per_heap));
+ memset(expand_mechanisms_per_heap, 0, sizeof (expand_mechanisms_per_heap));
+ memset(interesting_mechanism_bits_per_heap, 0, sizeof (interesting_mechanism_bits_per_heap));
+#endif //GC_CONFIG_DRIVEN
+
return 1;
}
@@ -10787,6 +10937,7 @@ void allocator::thread_item_front (BYTE* item, size_t size)
alloc_list* al = &alloc_list_of (a_l_number);
free_list_slot (item) = al->alloc_list_head();
free_list_undo (item) = UNDO_EMPTY;
+
if (al->alloc_list_tail() == 0)
{
al->alloc_list_tail() = al->alloc_list_head();
@@ -10803,6 +10954,17 @@ void allocator::copy_to_alloc_list (alloc_list* toalist)
for (unsigned int i = 0; i < num_buckets; i++)
{
toalist [i] = alloc_list_of (i);
+#ifdef FL_VERIFICATION
+ BYTE* free_item = alloc_list_head_of (i);
+ size_t count = 0;
+ while (free_item)
+ {
+ count++;
+ free_item = free_list_slot (free_item);
+ }
+
+ toalist[i].item_count = count;
+#endif //FL_VERIFICATION
}
}
@@ -10811,14 +10973,16 @@ void allocator::copy_from_alloc_list (alloc_list* fromalist)
BOOL repair_list = !discard_if_no_fit_p ();
for (unsigned int i = 0; i < num_buckets; i++)
{
+ size_t count = alloc_list_damage_count_of (i);
alloc_list_of (i) = fromalist [i];
+ assert (alloc_list_damage_count_of (i) == 0);
+
if (repair_list)
{
//repair the the list
//new items may have been added during the plan phase
//items may have been unlinked.
BYTE* free_item = alloc_list_head_of (i);
- size_t count = alloc_list_damage_count_of (i);
while (free_item && count)
{
assert (((CObjectHeader*)free_item)->IsFree());
@@ -10832,7 +10996,17 @@ void allocator::copy_from_alloc_list (alloc_list* fromalist)
free_item = free_list_slot (free_item);
}
- alloc_list_damage_count_of (i) = 0;
+#ifdef FL_VERIFICATION
+ free_item = alloc_list_head_of (i);
+ size_t item_count = 0;
+ while (free_item)
+ {
+ item_count++;
+ free_item = free_list_slot (free_item);
+ }
+
+ assert (item_count == alloc_list_of (i).item_count);
+#endif //FL_VERIFICATION
}
#ifdef DEBUG
BYTE* tail_item = alloc_list_tail_of (i);
@@ -13211,13 +13385,18 @@ BYTE* gc_heap::allocate_in_older_generation (generation* gen, size_t size,
allocator* gen_allocator = generation_allocator (gen);
BOOL discard_p = gen_allocator->discard_if_no_fit_p ();
int pad_in_front = (old_loc != 0)? USE_PADDING_FRONT : 0;
+
+ size_t real_size = size + Align (min_obj_size);
+ if (pad_in_front)
+ real_size += Align (min_obj_size);
+
if (! (size_fit_p (size REQD_ALIGN_AND_OFFSET_ARG, generation_allocation_pointer (gen),
generation_allocation_limit (gen), old_loc, USE_PADDING_TAIL | pad_in_front)))
{
size_t sz_list = gen_allocator->first_bucket_size();
for (unsigned int a_l_idx = 0; a_l_idx < gen_allocator->number_of_buckets(); a_l_idx++)
{
- if ((size < (sz_list / 2)) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
+ if ((real_size < (sz_list / 2)) || (a_l_idx == (gen_allocator->number_of_buckets()-1)))
{
BYTE* free_list = gen_allocator->alloc_list_head_of (a_l_idx);
BYTE* prev_free_item = 0;
@@ -13240,7 +13419,8 @@ BYTE* gc_heap::allocate_in_older_generation (generation* gen, size_t size,
adjust_limit (free_list, free_list_size, gen, from_gen_number+1);
goto finished;
}
- else if (discard_p)
+ // We do first fit on bucket 0 because we are not guaranteed to find a fit there.
+ else if (discard_p || (a_l_idx == 0))
{
dprintf (3, ("couldn't use this free area, discarding"));
generation_free_obj_space (gen) += free_list_size;
@@ -13890,6 +14070,8 @@ retry:
clear_plug_padded (old_loc);
pad = 0;
*convert_to_pinned_p = TRUE;
+ record_interesting_data_point (idp_converted_pin);
+
return 0;
}
}
@@ -13927,7 +14109,6 @@ inline int power (int x, int y)
return z;
}
-inline
int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation,
int n_initial,
BOOL* blocking_collection_p
@@ -13966,7 +14147,7 @@ int gc_heap::joined_generation_to_condemn (BOOL should_evaluate_elevation,
else
{
n = max_generation - 1;
- gc_data_global.set_mechanism_p (global_elevation);
+ settings.elevation_reduced = TRUE;
}
}
else
@@ -14426,7 +14607,7 @@ int gc_heap::generation_to_condemn (int n_initial,
if (should_expand_in_full_gc)
{
- dprintf (GTC_LOG, ("h%d: expand_in_full", heap_number));
+ dprintf (GTC_LOG, ("h%d: expand_in_full - BLOCK", heap_number));
*blocking_collection_p = TRUE;
if (!check_only_p)
{
@@ -14614,7 +14795,7 @@ exit:
}
local_condemn_reasons->set_gen (gen_final_per_heap, n);
- gc_data_per_heap.gen_to_condemn_reasons.init (local_condemn_reasons);
+ get_gc_data_per_heap()->gen_to_condemn_reasons.init (local_condemn_reasons);
#ifdef DT_LOG
local_condemn_reasons->print (heap_number);
@@ -14880,6 +15061,12 @@ void gc_heap::gc1()
FATAL_GC_ERROR();
size_t end_gc_time = (size_t) (ts.QuadPart/(qpf.QuadPart/1000));
+
+#ifdef GC_CONFIG_DRIVEN
+ if (heap_number == 0)
+ time_since_init = end_gc_time - time_init;
+#endif //GC_CONFIG_DRIVEN
+
// printf ("generation: %d, elapsed time: %Id\n", n, end_gc_time - dd_time_clock (dynamic_data_of (0)));
//adjust the allocation size from the pinned quantities.
@@ -14935,13 +15122,13 @@ void gc_heap::gc1()
int gen_num_for_data = ((n < (max_generation - 1)) ? (n + 1) : (max_generation + 1));
for (int gen_number = (n + 1); gen_number <= gen_num_for_data; gen_number++)
{
- gc_data_per_heap.gen_data[gen_number].size_after = generation_size (gen_number);
- gc_data_per_heap.gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number));
- gc_data_per_heap.gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number));
+ get_gc_data_per_heap()->gen_data[gen_number].size_after = generation_size (gen_number);
+ get_gc_data_per_heap()->gen_data[gen_number].free_list_space_after = generation_free_list_space (generation_of (gen_number));
+ get_gc_data_per_heap()->gen_data[gen_number].free_obj_space_after = generation_free_obj_space (generation_of (gen_number));
}
}
- gc_data_per_heap.maxgen_size_info.running_free_list_efficiency = (DWORD)(generation_allocator_efficiency (generation_of (max_generation)) * 100);
+ get_gc_data_per_heap()->maxgen_size_info.running_free_list_efficiency = (DWORD)(generation_allocator_efficiency (generation_of (max_generation)) * 100);
free_list_info (max_generation, "after computing new dynamic data");
@@ -15950,6 +16137,18 @@ void gc_heap::allocate_for_no_gc_after_gc()
#endif //MULTIPLE_HEAPS
}
+void gc_heap::init_records()
+{
+ memset (&gc_data_per_heap, 0, sizeof (gc_data_per_heap));
+ gc_data_per_heap.heap_index = heap_number;
+ if (heap_number == 0)
+ memset (&gc_data_global, 0, sizeof (gc_data_global));
+
+#ifdef GC_CONFIG_DRIVEN
+ memset (interesting_data_per_gc, 0, sizeof (interesting_data_per_gc));
+#endif //GC_CONFIG_DRIVEN
+}
+
int gc_heap::garbage_collect (int n)
{
//TODO BACKGROUND_GC remove these when ready
@@ -15995,18 +16194,9 @@ int gc_heap::garbage_collect (int n)
goto done;
}
-#ifdef BACKGROUND_GC
- if (recursive_gc_sync::background_running_p())
- {
- save_bgc_data_per_heap();
- }
-#endif //BACKGROUND_GC
-
- memset (&gc_data_per_heap, 0, sizeof (gc_data_per_heap));
- gc_data_per_heap.heap_index = heap_number;
- if (heap_number == 0)
- memset (&gc_data_global, 0, sizeof (gc_data_global));
+ init_records();
memset (&fgm_result, 0, sizeof (fgm_result));
+
settings.reason = gc_trigger_reason;
verify_pinned_queue_p = FALSE;
@@ -16047,7 +16237,7 @@ int gc_heap::garbage_collect (int n)
//copy the card and brick tables
if (g_card_table != g_heaps[i]->card_table)
{
- g_heaps[i]->copy_brick_card_table (FALSE);
+ g_heaps[i]->copy_brick_card_table();
}
g_heaps[i]->rearrange_large_heap_segments();
@@ -16070,7 +16260,7 @@ int gc_heap::garbage_collect (int n)
#endif //BACKGROUND_GC
// check for card table growth
if (g_card_table != card_table)
- copy_brick_card_table (FALSE);
+ copy_brick_card_table();
#endif //MULTIPLE_HEAPS
@@ -16236,8 +16426,8 @@ int gc_heap::garbage_collect (int n)
{
// We need to save the settings because we'll need to restore it after each FGC.
assert (settings.condemned_generation == max_generation);
+ settings.compaction = FALSE;
saved_bgc_settings = settings;
- bgc_data_saved_p = FALSE;
#ifdef MULTIPLE_HEAPS
if (heap_number == 0)
@@ -16346,6 +16536,7 @@ int gc_heap::garbage_collect (int n)
}
else
{
+ settings.compaction = TRUE;
c_write (settings.concurrent, FALSE);
}
@@ -16356,10 +16547,15 @@ int gc_heap::garbage_collect (int n)
if (do_concurrent_p)
{
+ // At this point we are sure we'll be starting a BGC, so save its per heap data here.
+ // global data is only calculated at the end of the GC so we don't need to worry about
+ // FGCs overwriting it.
+ memset (&bgc_data_per_heap, 0, sizeof (bgc_data_per_heap));
+ memcpy (&bgc_data_per_heap, &gc_data_per_heap, sizeof(gc_data_per_heap));
+
if (do_ephemeral_gc_p)
{
dprintf (2, ("GC threads running, doing gen%d GC", settings.condemned_generation));
- save_bgc_data_per_heap();
gen_to_condemn_reasons.init();
gen_to_condemn_reasons.set_condition (gen_before_bgc);
@@ -16730,7 +16926,11 @@ BYTE* gc_heap::find_object (BYTE* o, BYTE* low)
#endif //INTERIOR_POINTERS
#ifdef MARK_LIST
+#ifdef GC_CONFIG_DRIVEN
+#define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;} else {mark_list_index++;} if (slow > o) slow = o; if (shigh < o) shigh = o;}
+#else
#define m_boundary(o) {if (mark_list_index <= mark_list_end) {*mark_list_index = o;mark_list_index++;}if (slow > o) slow = o; if (shigh < o) shigh = o;}
+#endif //GC_CONFIG_DRIVEN
#else //MARK_LIST
#define m_boundary(o) {if (slow > o) slow = o; if (shigh < o) shigh = o;}
#endif //MARK_LIST
@@ -16971,8 +17171,13 @@ void gc_heap::enque_pinned_plug (BYTE* plug,
size_t last_obj_size = plug - last_object_in_last_plug;
if (last_obj_size < min_pre_pin_obj_size)
{
+ record_interesting_data_point (idp_pre_short);
+#ifdef SHORT_PLUGS
+ if (is_padded)
+ record_interesting_data_point (idp_pre_short_padded);
+#endif //SHORT_PLUGS
dprintf (3, ("encountered a short object %Ix right before pinned plug %Ix!",
- last_object_in_last_plug, plug));
+ last_object_in_last_plug, plug));
// Need to set the short bit regardless of having refs or not because we need to
// indicate that this object is not walkable.
m.set_pre_short();
@@ -17034,6 +17239,11 @@ void gc_heap::save_post_plug_info (BYTE* last_pinned_plug, BYTE* last_object_in_
if (last_obj_size < min_pre_pin_obj_size)
{
dprintf (3, ("PP %Ix last obj %Ix is too short", last_pinned_plug, last_object_in_last_plug));
+ record_interesting_data_point (idp_post_short);
+#ifdef SHORT_PLUGS
+ if (is_padded)
+ record_interesting_data_point (idp_post_short_padded);
+#endif //SHORT_PLUGS
m.set_post_short();
verify_pinned_queue_p = TRUE;
@@ -20832,6 +21042,13 @@ void gc_heap::store_plug_gap_info (BYTE* plug_start,
}
}
+void gc_heap::record_interesting_data_point (interesting_data_point idp)
+{
+#ifdef GC_CONFIG_DRIVEN
+ (interesting_data_per_gc[idp])++;
+#endif //GC_CONFIG_DRIVEN
+}
+
#ifdef _PREFAST_
#pragma warning(push)
#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
@@ -20864,8 +21081,13 @@ void gc_heap::plan_phase (int condemned_gen_number)
#ifdef MARK_LIST
BOOL use_mark_list = FALSE;
BYTE** mark_list_next = &mark_list[0];
+#ifdef GC_CONFIG_DRIVEN
+ dprintf (3, ("total number of marked objects: %Id (%Id)",
+ (mark_list_index - &mark_list[0]), ((mark_list_end - &mark_list[0]))));
+#else
dprintf (3, ("mark_list length: %Id",
- mark_list_index - &mark_list[0]));
+ (mark_list_index - &mark_list[0])));
+#endif //GC_CONFIG_DRIVEN
if ((condemned_gen_number < max_generation) &&
(mark_list_index <= mark_list_end)
@@ -20880,6 +21102,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
//verify_qsort_array (&mark_list[0], mark_list_index-1);
#endif //!MULTIPLE_HEAPS
use_mark_list = TRUE;
+ get_gc_data_per_heap()->set_mechanism_bit (gc_mark_list_bit);
}
else
{
@@ -21419,19 +21642,12 @@ void gc_heap::plan_phase (int condemned_gen_number)
else
{
#ifdef SIMPLE_DPRINTF
- dprintf (3, ("(%Ix)[%Ix->%Ix, NA: [%Ix(%Id), %Ix[: %Ix",
+ dprintf (3, ("(%Ix)[%Ix->%Ix, NA: [%Ix(%Id), %Ix[: %Ix(%d)",
(size_t)(node_gap_size (plug_start)),
plug_start, plug_end, (size_t)new_address, (size_t)(plug_start - new_address),
- (size_t)new_address + ps, ps));
+ (size_t)new_address + ps, ps,
+ (is_plug_padded (plug_start) ? 1 : 0)));
#endif //SIMPLE_DPRINTF
-
-#ifdef SHORT_PLUGS
- if (is_plug_padded (plug_start))
- {
- dprintf (3, ("%Ix was padded", plug_start));
- dd_padding_size (dd_active_old) += Align (min_obj_size);
- }
-#endif //SHORT_PLUGS
}
}
}
@@ -21753,7 +21969,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
free_list_allocated, rejected_free_space, end_seg_allocated,
condemned_allocated, generation_condemned_allocated (generation_of (settings.condemned_generation))));
- maxgen_size_increase* maxgen_size_info = &(gc_data_per_heap.maxgen_size_info);
+ maxgen_size_increase* maxgen_size_info = &(get_gc_data_per_heap()->maxgen_size_info);
maxgen_size_info->free_list_allocated = free_list_allocated;
maxgen_size_info->free_list_rejected = rejected_free_space;
maxgen_size_info->end_seg_allocated = end_seg_allocated;
@@ -21816,10 +22032,13 @@ void gc_heap::plan_phase (int condemned_gen_number)
settings.entry_memory_load));
should_compact = TRUE;
+ get_gc_data_per_heap()->set_mechanism (gc_heap_compact,
+ ((settings.gen0_reduction_count > 0) ? compact_fragmented_gen0 : compact_high_mem_load));
+
if ((condemned_gen_number >= (max_generation - 1)) &&
dt_low_ephemeral_space_p (tuning_deciding_expansion))
{
- dprintf(2,("Not enough space for all ephemeral generations with compaction"));
+ dprintf (2, ("Not enough space for all ephemeral generations with compaction"));
should_expand = TRUE;
}
}
@@ -21843,7 +22062,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
if (plan_loh())
{
should_compact = TRUE;
- gc_data_per_heap.set_mechanism (gc_compact, compact_loh_forced);
+ get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_loh_forced);
loh_compacted_p = TRUE;
}
}
@@ -21903,6 +22122,10 @@ void gc_heap::plan_phase (int condemned_gen_number)
settings.demotion = FALSE;
int pol_max = policy_sweep;
+#ifdef GC_CONFIG_DRIVEN
+ BOOL is_compaction_mandatory = FALSE;
+#endif //GC_CONFIG_DRIVEN
+
int i;
for (i = 0; i < n_heaps; i++)
{
@@ -21910,8 +22133,40 @@ void gc_heap::plan_phase (int condemned_gen_number)
pol_max = policy_compact;
// set the demotion flag is any of the heap has demotion
if (g_heaps[i]->demotion_high >= g_heaps[i]->demotion_low)
+ {
+ (g_heaps[i]->get_gc_data_per_heap())->set_mechanism_bit (gc_demotion_bit);
settings.demotion = TRUE;
+ }
+
+#ifdef GC_CONFIG_DRIVEN
+ if (!is_compaction_mandatory)
+ {
+ int compact_reason = (g_heaps[i]->get_gc_data_per_heap())->get_mechanism (gc_heap_compact);
+ if (compact_reason >= 0)
+ {
+ if (gc_heap_compact_reason_mandatory_p[compact_reason])
+ is_compaction_mandatory = TRUE;
+ }
+ }
+#endif //GC_CONFIG_DRIVEN
+ }
+
+#ifdef GC_CONFIG_DRIVEN
+ if (!is_compaction_mandatory)
+ {
+ // If compaction is not mandatory we can feel free to change it to a sweeping GC.
+ // Note that we may want to change this to only checking every so often instead of every single GC.
+ if (should_do_sweeping_gc (pol_max >= policy_compact))
+ {
+ pol_max = policy_sweep;
+ }
+ else
+ {
+ if (pol_max == policy_sweep)
+ pol_max = policy_compact;
+ }
}
+#endif //GC_CONFIG_DRIVEN
for (i = 0; i < n_heaps; i++)
{
@@ -21944,7 +22199,7 @@ void gc_heap::plan_phase (int condemned_gen_number)
//copy the card and brick tables
if (g_card_table!= g_heaps[i]->card_table)
{
- g_heaps[i]->copy_brick_card_table (TRUE);
+ g_heaps[i]->copy_brick_card_table();
}
if (is_full_compacting_gc)
@@ -21971,6 +22226,23 @@ void gc_heap::plan_phase (int condemned_gen_number)
}
settings.demotion = ((demotion_high >= demotion_low) ? TRUE : FALSE);
+ if (settings.demotion)
+ get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit);
+
+#ifdef GC_CONFIG_DRIVEN
+ BOOL is_compaction_mandatory = FALSE;
+ int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact);
+ if (compact_reason >= 0)
+ is_compaction_mandatory = gc_heap_compact_reason_mandatory_p[compact_reason];
+
+ if (!is_compaction_mandatory)
+ {
+ if (should_do_sweeping_gc (should_compact))
+ should_compact = FALSE;
+ else
+ should_compact = TRUE;
+ }
+#endif //GC_CONFIG_DRIVEN
if (should_compact && (condemned_gen_number == max_generation))
{
@@ -22389,7 +22661,6 @@ BOOL gc_heap::ensure_gap_allocation (int condemned_gen_number)
heap_segment_committed (ephemeral_heap_segment))
{
if (!grow_heap_segment (ephemeral_heap_segment, start + size))
-
{
return FALSE;
}
@@ -24105,6 +24376,15 @@ void gc_heap::recover_saved_pinned_info()
{
mark* oldest_entry = oldest_pin();
oldest_entry->recover_plug_info();
+#ifdef GC_CONFIG_DRIVEN
+ if (oldest_entry->has_pre_plug_info() && oldest_entry->has_post_plug_info())
+ record_interesting_data_point (idp_pre_and_post_pin);
+ else if (oldest_entry->has_pre_plug_info())
+ record_interesting_data_point (idp_pre_pin);
+ else if (oldest_entry->has_post_plug_info())
+ record_interesting_data_point (idp_post_pin);
+#endif //GC_CONFIG_DRIVEN
+
deque_pinned_plug();
}
}
@@ -24130,7 +24410,7 @@ void gc_heap::compact_phase (int condemned_gen_number,
update_oldest_pinned_plug();
BOOL reused_seg = FALSE;
- int heap_expand_mechanism = gc_data_per_heap.get_mechanism (gc_heap_expand);
+ int heap_expand_mechanism = get_gc_data_per_heap()->get_mechanism (gc_heap_expand);
if ((heap_expand_mechanism == expand_reuse_bestfit) ||
(heap_expand_mechanism == expand_reuse_normal))
{
@@ -24592,17 +24872,6 @@ void gc_heap::recover_bgc_settings()
}
}
-inline
-void gc_heap::save_bgc_data_per_heap()
-{
- if (!bgc_data_saved_p)
- {
- memset (&saved_bgc_data_per_heap, 0, sizeof (saved_bgc_data_per_heap));
- memcpy (&saved_bgc_data_per_heap, &gc_data_per_heap, sizeof(gc_data_per_heap));
- bgc_data_saved_p = TRUE;
- }
-}
-
void gc_heap::allow_fgc()
{
assert (bgc_thread == GetThread());
@@ -28747,7 +29016,7 @@ generation* gc_heap::expand_heap (int condemned_generation,
//copy the card and brick tables
if (g_card_table!= card_table)
- copy_brick_card_table (TRUE);
+ copy_brick_card_table();
BOOL new_segment_p = (heap_segment_next (new_seg) == 0);
dprintf (2, ("new_segment_p %Ix", (size_t)new_segment_p));
@@ -28779,8 +29048,7 @@ generation* gc_heap::expand_heap (int condemned_generation,
if (should_promote_ephemeral)
{
ephemeral_promotion = TRUE;
- gc_data_per_heap.clear_mechanism (gc_heap_expand);
- gc_data_per_heap.set_mechanism (gc_heap_expand, expand_new_seg_ep);
+ get_gc_data_per_heap()->set_mechanism (gc_heap_expand, expand_new_seg_ep);
dprintf (2, ("promoting ephemeral"));
save_ephemeral_generation_starts();
}
@@ -28885,6 +29153,7 @@ generation* gc_heap::expand_heap (int condemned_generation,
dprintf (2, ("Demoting ephemeral segment"));
//demote the entire segment.
settings.demotion = TRUE;
+ get_gc_data_per_heap()->set_mechanism_bit (gc_demotion_bit);
demotion_low = heap_segment_mem (ephemeral_heap_segment);
demotion_high = heap_segment_reserved (ephemeral_heap_segment);
}
@@ -28894,6 +29163,7 @@ generation* gc_heap::expand_heap (int condemned_generation,
demotion_high = 0;
#ifndef MULTIPLE_HEAPS
settings.demotion = FALSE;
+ get_gc_data_per_heap()->clear_mechanism_bit (gc_demotion_bit);
#endif //!MULTIPLE_HEAPS
}
ptrdiff_t eph_size1 = total_ephemeral_size;
@@ -28939,6 +29209,11 @@ bool gc_heap::init_dynamic_data()
dd->time_clock = now;
}
+#ifdef GC_CONFIG_DRIVEN
+ if (heap_number == 0)
+ time_init = now;
+#endif //GC_CONFIG_DRIVEN
+
// get the registry setting for generation 0 size
size_t gen0size = GCHeap::GetValidGen0MaxSize(get_valid_segment_size());
@@ -29185,9 +29460,9 @@ size_t gc_heap::desired_new_allocation (dynamic_data* dd,
size_t free_space = generation_free_list_space (generation_of (gen_number));
// DTREVIEW - is min_gc_size really a good choice?
// on 64-bit this will almost always be true.
+ dprintf (GTC_LOG, ("frag: %Id, min: %Id", free_space, min_gc_size));
if (free_space > min_gc_size)
{
- dprintf (2, ("Detected excessive Fragmentation"));
settings.gen0_reduction_count = 2;
}
else
@@ -29203,7 +29478,6 @@ size_t gc_heap::desired_new_allocation (dynamic_data* dd,
max (min_gc_size, (max_size/3)));
}
}
-
}
size_t new_allocation_ret =
@@ -29404,9 +29678,7 @@ inline
gc_history_per_heap* gc_heap::get_gc_data_per_heap()
{
#ifdef BACKGROUND_GC
- return (settings.concurrent ?
- (bgc_data_saved_p ? &saved_bgc_data_per_heap : &gc_data_per_heap) :
- &gc_data_per_heap);
+ return (settings.concurrent ? &bgc_data_per_heap : &gc_data_per_heap);
#else
return &gc_data_per_heap;
#endif //BACKGROUND_GC
@@ -29557,51 +29829,49 @@ void gc_heap::decommit_ephemeral_segment_pages()
return;
}
- BOOL trim_p = FALSE;
size_t slack_space = heap_segment_committed (ephemeral_heap_segment) - heap_segment_allocated (ephemeral_heap_segment);
dynamic_data* dd = dynamic_data_of (0);
- if (settings.condemned_generation >= (max_generation-1))
- {
- trim_p = TRUE;
- size_t new_slack_space =
-#ifdef _WIN64
- max(min(min(get_valid_segment_size()/32, dd_max_size(dd)), (generation_size (max_generation) / 10)), dd_desired_allocation(dd));
-#else
-#ifdef FEATURE_CORECLR
- dd_desired_allocation (dd);
-#else
- dd_max_size (dd);
-#endif //FEATURE_CORECLR
-#endif //_WIN64
-
- slack_space = min (slack_space, new_slack_space);
- }
-
#ifndef MULTIPLE_HEAPS
size_t extra_space = (g_low_memory_status ? 0 : (512 * 1024));
size_t decommit_timeout = (g_low_memory_status ? 0 : GC_EPHEMERAL_DECOMMIT_TIMEOUT);
size_t ephemeral_elapsed = dd_time_clock(dd) - gc_last_ephemeral_decommit_time;
- if (dd_desired_allocation (dynamic_data_of(0)) > gc_gen0_desired_high)
+ if (dd_desired_allocation (dd) > gc_gen0_desired_high)
{
- gc_gen0_desired_high = dd_desired_allocation (dynamic_data_of(0)) + extra_space;
+ gc_gen0_desired_high = dd_desired_allocation (dd) + extra_space;
}
if (ephemeral_elapsed >= decommit_timeout)
{
slack_space = min (slack_space, gc_gen0_desired_high);
- gc_last_ephemeral_decommit_time = dd_time_clock(dynamic_data_of(0));
+ gc_last_ephemeral_decommit_time = dd_time_clock(dd);
gc_gen0_desired_high = 0;
}
#endif //!MULTIPLE_HEAPS
- size_t saved_slack_space = slack_space;
- size_t current_slack_space = ((slack_space < gen0_big_free_spaces) ? 0 : (slack_space - gen0_big_free_spaces));
- slack_space = current_slack_space;
+ if (settings.condemned_generation >= (max_generation-1))
+ {
+ size_t new_slack_space =
+#ifdef _WIN64
+ max(min(min(get_valid_segment_size()/32, dd_max_size(dd)), (generation_size (max_generation) / 10)), dd_desired_allocation(dd));
+#else
+#ifdef FEATURE_CORECLR
+ dd_desired_allocation (dd);
+#else
+ dd_max_size (dd);
+#endif //FEATURE_CORECLR
+#endif //_WIN64
+
+ slack_space = min (slack_space, new_slack_space);
+
+ size_t saved_slack_space = slack_space;
+ new_slack_space = ((slack_space < gen0_big_free_spaces) ? 0 : (slack_space - gen0_big_free_spaces));
+ slack_space = new_slack_space;
+ dprintf (1, ("ss: %Id->%Id", saved_slack_space, slack_space));
+ }
- dprintf (1, ("ss: %Id->%Id", saved_slack_space, slack_space));
decommit_heap_segment_pages (ephemeral_heap_segment, slack_space);
gc_history_per_heap* current_gc_data_per_heap = get_gc_data_per_heap();
@@ -29710,6 +29980,8 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
float fragmentation_burden = ( ((0 == fragmentation) || (0 == gen_sizes)) ? (0.0f) :
(float (fragmentation) / gen_sizes) );
+ dprintf (GTC_LOG, ("fragmentation: %Id (%d%%)", fragmentation, (int)(fragmentation_burden * 100.0)));
+
#ifdef STRESS_HEAP
// for pure GC stress runs we need compaction, for GC stress "mix"
// we need to ensure a better mix of compacting and sweeping collections
@@ -29740,12 +30012,14 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
{
should_compact = TRUE;
last_gc_before_oom = FALSE;
+ get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_last_gc);
}
if (settings.reason == reason_induced_compacting)
{
dprintf (2, ("induced compacting GC"));
should_compact = TRUE;
+ get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_induced_compacting);
}
dprintf (2, ("Fragmentation: %d Fragmentation burden %d%%",
@@ -29757,7 +30031,7 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
{
dprintf(GTC_LOG, ("compacting due to low ephemeral"));
should_compact = TRUE;
- gc_data_per_heap.set_mechanism (gc_compact, compact_low_ephemeral);
+ get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_low_ephemeral);
}
}
@@ -29794,8 +30068,8 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
{
#endif // BACKGROUND_GC
assert (settings.concurrent == FALSE);
- dprintf(GTC_LOG,("compacting due to fragmentation"));
should_compact = TRUE;
+ get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_frag);
#ifdef BACKGROUND_GC
}
#endif // BACKGROUND_GC
@@ -29817,6 +30091,7 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
{
dprintf(GTC_LOG,("compacting due to fragmentation in high memory"));
should_compact = TRUE;
+ get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_high_mem_frag);
}
high_memory = TRUE;
}
@@ -29826,6 +30101,7 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
{
dprintf(GTC_LOG,("compacting due to fragmentation in very high memory"));
should_compact = TRUE;
+ get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_vhigh_mem_frag);
}
high_memory = TRUE;
}
@@ -29840,7 +30116,7 @@ BOOL gc_heap::decide_on_compacting (int condemned_gen_number,
(ensure_gap_allocation (condemned_gen_number) == FALSE))
{
should_compact = TRUE;
- gc_data_per_heap.set_mechanism (gc_compact, compact_no_gaps);
+ get_gc_data_per_heap()->set_mechanism (gc_heap_compact, compact_no_gaps);
}
if (settings.condemned_generation == max_generation)
@@ -31710,9 +31986,9 @@ void gc_heap::descr_generations (BOOL begin_gc_p)
heap_segment* seg = generation_start_segment (gen);
while (seg && (seg != ephemeral_heap_segment))
{
- dprintf (GTC_LOG,("g%d: [%Ix %Ix[-%Ix[ (%Id) (%Id)",
- curr_gen_number,
- (size_t)heap_segment_mem (seg),
+ dprintf (GTC_LOG, ("g%d: [%Ix %Ix[-%Ix[ (%Id) (%Id)",
+ curr_gen_number,
+ (size_t)heap_segment_mem (seg),
(size_t)heap_segment_allocated (seg),
(size_t)heap_segment_committed (seg),
(size_t)(heap_segment_allocated (seg) - heap_segment_mem (seg)),
@@ -31722,7 +31998,7 @@ void gc_heap::descr_generations (BOOL begin_gc_p)
}
if (seg && (seg != generation_start_segment (gen)))
{
- dprintf (GTC_LOG,("g%d: [%Ix %Ix[",
+ dprintf (GTC_LOG, ("g%d: [%Ix %Ix[",
curr_gen_number,
(size_t)heap_segment_mem (seg),
(size_t)generation_allocation_start (generation_of (curr_gen_number-1))));
@@ -31731,7 +32007,7 @@ void gc_heap::descr_generations (BOOL begin_gc_p)
}
else if (seg)
{
- dprintf (GTC_LOG,("g%d: [%Ix %Ix[",
+ dprintf (GTC_LOG, ("g%d: [%Ix %Ix[",
curr_gen_number,
(size_t)generation_allocation_start (generation_of (curr_gen_number)),
(size_t)(((curr_gen_number == 0)) ?
@@ -32497,7 +32773,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
//copy the card and brick tables
if (g_card_table != g_heaps[i]->card_table)
{
- g_heaps[i]->copy_brick_card_table (FALSE);
+ g_heaps[i]->copy_brick_card_table();
}
}
@@ -32505,7 +32781,7 @@ gc_heap::verify_heap (BOOL begin_gc_p)
}
#else
if (g_card_table != card_table)
- copy_brick_card_table (FALSE);
+ copy_brick_card_table();
#endif //MULTIPLE_HEAPS
//verify that the generation structures makes sense
@@ -34395,21 +34671,27 @@ void gc_heap::do_pre_gc()
(ULONG)settings.reason);
#endif // STRESS_LOG
+#ifdef MULTIPLE_HEAPS
+ gc_heap* hp = g_heaps[0];
+#else
+ gc_heap* hp = 0;
+#endif //MULTIPLE_HEAPS
+
#ifdef BACKGROUND_GC
- settings.b_state = current_bgc_state;
+ settings.b_state = hp->current_bgc_state;
#endif //BACKGROUND_GC
#ifdef BACKGROUND_GC
dprintf (1, ("*GC* %d(gen0:%d)(%d)(%s)(%d)",
VolatileLoad(&settings.gc_index),
- dd_collection_count (dynamic_data_of (0)),
+ dd_collection_count (hp->dynamic_data_of (0)),
settings.condemned_generation,
(settings.concurrent ? "BGC" : (recursive_gc_sync::background_running_p() ? "FGC" : "NGC")),
- VolatileLoad(&current_bgc_state)));
+ settings.b_state));
#else
dprintf (1, ("*GC* %d(gen0:%d)(%d)",
VolatileLoad(&settings.gc_index),
- dd_collection_count (dynamic_data_of (0)),
+ dd_collection_count(hp->dynamic_data_of(0)),
settings.condemned_generation));
#endif //BACKGROUND_GC
@@ -34457,6 +34739,104 @@ void gc_heap::do_pre_gc()
#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING
}
+#ifdef GC_CONFIG_DRIVEN
+void gc_heap::record_interesting_info_per_heap()
+{
+ // datapoints are always from the last blocking GC so don't record again
+ // for BGCs.
+ if (!(settings.concurrent))
+ {
+ for (int i = 0; i < max_idp_count; i++)
+ {
+ interesting_data_per_heap[i] += interesting_data_per_gc[i];
+ }
+ }
+
+ int compact_reason = get_gc_data_per_heap()->get_mechanism (gc_heap_compact);
+ if (compact_reason >= 0)
+ (compact_reasons_per_heap[compact_reason])++;
+ int expand_mechanism = get_gc_data_per_heap()->get_mechanism (gc_heap_expand);
+ if (expand_mechanism >= 0)
+ (expand_mechanisms_per_heap[expand_mechanism])++;
+
+ for (int i = 0; i < max_gc_mechanism_bits_count; i++)
+ {
+ if (get_gc_data_per_heap()->is_mechanism_bit_set ((gc_mechanism_bit_per_heap)i))
+ (interesting_mechanism_bits_per_heap[i])++;
+ }
+
+ // h# | GC | gen | C | EX | NF | BF | ML | DM || PreS | PostS | Merge | Conv | Pre | Post | PrPo | PreP | PostP |
+ cprintf (("%2d | %6d | %1d | %1s | %2s | %2s | %2s | %2s | %2s || %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id | %5Id |",
+ heap_number,
+ settings.gc_index,
+ settings.condemned_generation,
+ // TEMP - I am just doing this for wks GC 'cuase I wanna see the pattern of doing C/S GCs.
+ (settings.compaction ? (((compact_reason >= 0) && gc_heap_compact_reason_mandatory_p[compact_reason]) ? "M" : "W") : ""), // compaction
+ ((expand_mechanism >= 0)? "X" : ""), // EX
+ ((expand_mechanism == expand_reuse_normal) ? "X" : ""), // NF
+ ((expand_mechanism == expand_reuse_bestfit) ? "X" : ""), // BF
+ (get_gc_data_per_heap()->is_mechanism_bit_set (gc_mark_list_bit) ? "X" : ""), // ML
+ (get_gc_data_per_heap()->is_mechanism_bit_set (gc_demotion_bit) ? "X" : ""), // DM
+ interesting_data_per_gc[idp_pre_short],
+ interesting_data_per_gc[idp_post_short],
+ interesting_data_per_gc[idp_merged_pin],
+ interesting_data_per_gc[idp_converted_pin],
+ interesting_data_per_gc[idp_pre_pin],
+ interesting_data_per_gc[idp_post_pin],
+ interesting_data_per_gc[idp_pre_and_post_pin],
+ interesting_data_per_gc[idp_pre_short_padded],
+ interesting_data_per_gc[idp_post_short_padded]));
+}
+
+void gc_heap::record_global_mechanisms()
+{
+ for (int i = 0; i < max_global_mechanisms_count; i++)
+ {
+ if (gc_data_global.get_mechanism_p ((gc_global_mechanism_p)i))
+ {
+ ::record_global_mechanism (i);
+ }
+ }
+}
+
+BOOL gc_heap::should_do_sweeping_gc (BOOL compact_p)
+{
+ if (!compact_ratio)
+ return (!compact_p);
+
+ size_t compact_count = compact_or_sweep_gcs[0];
+ size_t sweep_count = compact_or_sweep_gcs[1];
+
+ size_t total_count = compact_count + sweep_count;
+ BOOL should_compact = compact_p;
+ if (total_count > 3)
+ {
+ if (compact_p)
+ {
+ int temp_ratio = (int)((compact_count + 1) * 100 / (total_count + 1));
+ if (temp_ratio > compact_ratio)
+ {
+ // cprintf (("compact would be: %d, total_count: %d, ratio would be %d%% > target\n",
+ // (compact_count + 1), (total_count + 1), temp_ratio));
+ should_compact = FALSE;
+ }
+ }
+ else
+ {
+ int temp_ratio = (int)((sweep_count + 1) * 100 / (total_count + 1));
+ if (temp_ratio > (100 - compact_ratio))
+ {
+ // cprintf (("sweep would be: %d, total_count: %d, ratio would be %d%% > target\n",
+ // (sweep_count + 1), (total_count + 1), temp_ratio));
+ should_compact = TRUE;
+ }
+ }
+ }
+
+ return !should_compact;
+}
+#endif //GC_CONFIG_DRIVEN
+
void gc_heap::do_post_gc()
{
if (!settings.concurrent)
@@ -34473,6 +34853,12 @@ void gc_heap::do_post_gc()
#endif //COUNT_CYCLES
#endif //TRACE_GC
+#ifdef MULTIPLE_HEAPS
+ gc_heap* hp = g_heaps[0];
+#else
+ gc_heap* hp = 0;
+#endif //MULTIPLE_HEAPS
+
GCToEEInterface::GcDone(settings.condemned_generation);
#ifdef GC_PROFILING
@@ -34483,11 +34869,10 @@ void gc_heap::do_post_gc()
}
#endif // GC_PROFILING
-
//dprintf (1, (" ****end of Garbage Collection**** %d(gen0:%d)(%d)",
dprintf (1, ("*EGC* %d(gen0:%d)(%d)(%s)",
VolatileLoad(&settings.gc_index),
- dd_collection_count (dynamic_data_of (0)),
+ dd_collection_count(hp->dynamic_data_of(0)),
settings.condemned_generation,
(settings.concurrent ? "BGC" : "GC")));
@@ -34504,6 +34889,24 @@ void gc_heap::do_post_gc()
(ULONG)settings.condemned_generation,
(ULONG)settings.reason);
#endif // STRESS_LOG
+
+#ifdef GC_CONFIG_DRIVEN
+ if (!settings.concurrent)
+ {
+ if (settings.compaction)
+ (compact_or_sweep_gcs[0])++;
+ else
+ (compact_or_sweep_gcs[1])++;
+ }
+
+#ifdef MULTIPLE_HEAPS
+ for (int i = 0; i < n_heaps; i++)
+ g_heaps[i]->record_interesting_info_per_heap();
+#else
+ record_interesting_info_per_heap();
+#endif //MULTIPLE_HEAPS
+ record_global_mechanisms();
+#endif //GC_CONFIG_DRIVEN
}
unsigned GCHeap::GetGcCount()
diff --git a/src/gc/gc.h b/src/gc/gc.h
index f571cbcbb4..4f21d535b7 100644
--- a/src/gc/gc.h
+++ b/src/gc/gc.h
@@ -109,6 +109,11 @@ class GCHeap;
GPTR_DECL(GCHeap, g_pGCHeap);
+#ifdef GC_CONFIG_DRIVEN
+#define MAX_GLOBAL_GC_MECHANISMS_COUNT 6
+GARY_DECL(size_t, gc_global_mechanisms, MAX_GLOBAL_GC_MECHANISMS_COUNT);
+#endif //GC_CONFIG_DRIVEN
+
#ifndef DACCESS_COMPILE
extern "C" {
#endif
@@ -344,6 +349,10 @@ void record_changed_seg (BYTE* start, BYTE* end,
bgc_state current_bgc_state,
changed_seg_state changed_state);
+#ifdef GC_CONFIG_DRIVEN
+void record_global_mechanism (int mech_index);
+#endif //GC_CONFIG_DRIVEN
+
//constants for the flags parameter to the gc call back
#define GC_CALL_INTERIOR 0x1
@@ -377,19 +386,23 @@ public:
#endif
}
-#ifndef CLR_STANDALONE_BINDER
- static BOOL IsGCHeapInitialized()
- {
- LIMITED_METHOD_CONTRACT;
+#ifndef CLR_STANDALONE_BINDER
- return (g_pGCHeap != NULL);
- }
+#ifndef DACCESS_COMPILE
static BOOL IsGCInProgress(BOOL bConsiderGCStart = FALSE)
{
WRAPPER_NO_CONTRACT;
return (IsGCHeapInitialized() ? GetGCHeap()->IsGCInProgressHelper(bConsiderGCStart) : false);
}
+#endif
+
+ static BOOL IsGCHeapInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (g_pGCHeap != NULL);
+ }
static void WaitForGCCompletion(BOOL bConsiderGCStart = FALSE)
{
diff --git a/src/gc/gccommon.cpp b/src/gc/gccommon.cpp
index b7686056ae..6982d537f5 100644
--- a/src/gc/gccommon.cpp
+++ b/src/gc/gccommon.cpp
@@ -28,6 +28,10 @@ GPTR_IMPL(DWORD,g_card_table);
GPTR_IMPL_INIT(BYTE,g_lowest_address,0);
GPTR_IMPL_INIT(BYTE,g_highest_address,0);
+#ifdef GC_CONFIG_DRIVEN
+GARY_IMPL(size_t, gc_global_mechanisms, MAX_GLOBAL_GC_MECHANISMS_COUNT);
+#endif //GC_CONFIG_DRIVEN
+
#ifndef DACCESS_COMPILE
BYTE* g_ephemeral_low = (BYTE*)1;
@@ -41,6 +45,13 @@ BYTE* g_shadow_lowest_address = NULL;
VOLATILE(LONG) m_GCLock = -1;
+#ifdef GC_CONFIG_DRIVEN
+void record_global_mechanism (int mech_index)
+{
+ (gc_global_mechanisms[mech_index])++;
+}
+#endif //GC_CONFIG_DRIVEN
+
LONG g_bLowMemoryFromHost = 0;
#ifdef WRITE_BARRIER_CHECK
diff --git a/src/gc/gcpriv.h b/src/gc/gcpriv.h
index 7bdd37ced8..aa1e7db0cc 100644
--- a/src/gc/gcpriv.h
+++ b/src/gc/gcpriv.h
@@ -161,6 +161,11 @@ inline void FATAL_GC_ERROR()
/* End of optional features */
+#ifdef GC_CONFIG_DRIVEN
+void GCLogConfig (const char *fmt, ... );
+#define cprintf(x) {GCLogConfig x;}
+#endif //GC_CONFIG_DRIVEN
+
#ifdef _DEBUG
#define TRACE_GC
#endif
@@ -356,21 +361,16 @@ public:
#ifdef SIMPLE_DPRINTF
//#define dprintf(l,x) {if (trace_gc && ((l<=print_level)||gc_heap::settings.concurrent)) {printf ("\n");printf x ; fflush(stdout);}}
-void LogValist(const char *fmt, va_list args);
void GCLog (const char *fmt, ... );
//#define dprintf(l,x) {if (trace_gc && (l<=print_level)) {GCLog x;}}
//#define dprintf(l,x) {if ((l==SEG_REUSE_LOG_0) || (l==SEG_REUSE_LOG_1) || (trace_gc && (l<=3))) {GCLog x;}}
//#define dprintf(l,x) {if (l == DT_LOG_0) {GCLog x;}}
//#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == BGC_LOG) || (l==GTC_LOG))) {GCLog x;}}
-//#define dprintf(l,x) {if (l==GTC_LOG) {GCLog x;}}
-//#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == 1234)) ) {GCLog x;}}
-//#define dprintf(l,x) {if ((l <= 1) || (l == 2222)) {GCLog x;}}
+//#define dprintf(l,x) {if ((l == 1) || (l == 2222)) {GCLog x;}}
#define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG)) {GCLog x;}}
-//#define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG) ||(l == DT_LOG_0)) {GCLog x;}}
//#define dprintf(l,x) {if ((l==GTC_LOG) || (l <= 1)) {GCLog x;}}
//#define dprintf(l,x) {if (trace_gc && ((l <= print_level) || (l==GTC_LOG))) {GCLog x;}}
//#define dprintf(l,x) {if (l==GTC_LOG) {printf ("\n");printf x ; fflush(stdout);}}
-
#else //SIMPLE_DPRINTF
// The GCTrace output goes to stdout by default but can get sent to the stress log or the logfile if the
@@ -408,7 +408,6 @@ struct GCDebugSpinLock {
: lock(-1), holding_thread((Thread*) -1)
{
}
-
};
typedef GCDebugSpinLock GCSpinLock;
@@ -572,6 +571,7 @@ public:
int gen0_reduction_count;
BOOL should_lock_elevation;
int elevation_locked_count;
+ BOOL elevation_reduced;
BOOL minimal_gc;
gc_reason reason;
gc_pause_mode pause_mode;
@@ -731,9 +731,13 @@ class alloc_list
{
BYTE* head;
BYTE* tail;
- size_t damage_count;
+ size_t damage_count;
public:
+#ifdef FL_VERIFICATION
+ size_t item_count;
+#endif //FL_VERIFICATION
+
BYTE*& alloc_list_head () { return head;}
BYTE*& alloc_list_tail () { return tail;}
size_t& alloc_list_damage_count(){ return damage_count; }
@@ -1115,6 +1119,26 @@ struct no_gc_region_info
BOOL minimal_gc_p;
};
+// if you change these, make sure you update them for sos (strike.cpp) as well.
+//
+// !!!NOTE!!!
+// Right now I am only recording data from blocking GCs. When recording from BGC,
+// it should have its own copy just like gc_data_per_heap.
+// for BGCs we will have a very different set of datapoints to record.
+enum interesting_data_point
+{
+ idp_pre_short = 0,
+ idp_post_short = 1,
+ idp_merged_pin = 2,
+ idp_converted_pin = 3,
+ idp_pre_pin = 4,
+ idp_post_pin = 5,
+ idp_pre_and_post_pin = 6,
+ idp_pre_short_padded = 7,
+ idp_post_short_padded = 8,
+ max_idp_count
+};
+
//class definition of the internal class
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
extern void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw);
@@ -1284,10 +1308,10 @@ public:
BYTE* pad_for_alignment_large (BYTE* newAlloc, int requiredAlignment, size_t size);
#endif // FEATURE_STRUCTALIGN
- PER_HEAP
+ PER_HEAP_ISOLATED
void do_pre_gc();
- PER_HEAP
+ PER_HEAP_ISOLATED
void do_post_gc();
PER_HEAP
@@ -1299,6 +1323,9 @@ public:
PER_HEAP
int garbage_collect (int n);
+ PER_HEAP
+ void init_records();
+
static
DWORD* make_card_table (BYTE* start, BYTE* end);
@@ -1758,13 +1785,13 @@ protected:
void copy_brick_card_range (BYTE* la, DWORD* old_card_table,
short* old_brick_table,
heap_segment* seg,
- BYTE* start, BYTE* end, BOOL heap_expand);
+ BYTE* start, BYTE* end);
PER_HEAP
void init_brick_card_range (heap_segment* seg);
PER_HEAP
void copy_brick_card_table_l_heap ();
PER_HEAP
- void copy_brick_card_table(BOOL heap_expand);
+ void copy_brick_card_table();
PER_HEAP
void clear_brick_table (BYTE* from, BYTE* end);
PER_HEAP
@@ -2143,6 +2170,18 @@ protected:
PER_HEAP
void plan_phase (int condemned_gen_number);
+ PER_HEAP
+ void record_interesting_data_point (interesting_data_point idp);
+
+#ifdef GC_CONFIG_DRIVEN
+ PER_HEAP
+ void record_interesting_info_per_heap();
+ PER_HEAP_ISOLATED
+ void record_global_mechanisms();
+ PER_HEAP_ISOLATED
+ BOOL should_do_sweeping_gc (BOOL compact_p);
+#endif //GC_CONFIG_DRIVEN
+
#ifdef FEATURE_LOH_COMPACTION
// plan_loh can allocate memory so it can fail. If it fails, we will
// fall back to sweeping.
@@ -2935,6 +2974,11 @@ public:
PER_HEAP
size_t gen0_big_free_spaces;
+#ifdef SHORT_PLUGS
+ PER_HEAP_ISOLATED
+ float short_plugs_pad_ratio;
+#endif //SHORT_PLUGS
+
#ifdef _WIN64
PER_HEAP_ISOLATED
size_t youngest_gen_desired_th;
@@ -3070,10 +3114,7 @@ protected:
gc_mechanisms saved_bgc_settings;
PER_HEAP
- gc_history_per_heap saved_bgc_data_per_heap;
-
- PER_HEAP
- BOOL bgc_data_saved_p;
+ gc_history_per_heap bgc_data_per_heap;
PER_HEAP
BOOL bgc_thread_running; // gc thread is its main loop
@@ -3421,7 +3462,11 @@ protected:
alloc_list loh_alloc_list[NUM_LOH_ALIST-1];
#define NUM_GEN2_ALIST (12)
-#define BASE_GEN2_ALIST (1*64)
+#ifdef _WIN64
+#define BASE_GEN2_ALIST (1*256)
+#else
+#define BASE_GEN2_ALIST (1*128)
+#endif //_WIN64
PER_HEAP
alloc_list gen2_alloc_list[NUM_GEN2_ALIST-1];
@@ -3521,6 +3566,36 @@ protected:
PER_HEAP_ISOLATED
size_t eph_gen_starts_size;
+#ifdef GC_CONFIG_DRIVEN
+ PER_HEAP_ISOLATED
+ size_t time_init;
+
+ PER_HEAP_ISOLATED
+ size_t time_since_init;
+
+ // 0 stores compacting GCs;
+ // 1 stores sweeping GCs;
+ PER_HEAP_ISOLATED
+ size_t compact_or_sweep_gcs[2];
+
+ PER_HEAP
+ size_t interesting_data_per_gc[max_idp_count];
+
+#ifdef MULTIPLE_HEAPS
+ PER_HEAP
+ size_t interesting_data_per_heap[max_idp_count];
+
+ PER_HEAP
+ size_t compact_reasons_per_heap[max_compact_reasons_count];
+
+ PER_HEAP
+ size_t expand_mechanisms_per_heap[max_expand_mechanisms_count];
+
+ PER_HEAP
+ size_t interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count];
+#endif //MULTIPLE_HEAPS
+#endif //GC_CONFIG_DRIVEN
+
PER_HEAP
BOOL ro_segments_in_range;
@@ -4042,8 +4117,10 @@ size_t generation_unusable_fragmentation (generation* inst)
#define plug_skew sizeof(ObjHeader)
#define min_obj_size (sizeof(BYTE*)+plug_skew+sizeof(size_t))//syncblock + vtable+ first field
-#define min_free_list (sizeof(BYTE*)+min_obj_size) //Need one slot more
//Note that this encodes the fact that plug_skew is a multiple of BYTE*.
+// We always use USE_PADDING_TAIL when fitting so items on the free list should be
+// twice the min_obj_size.
+#define min_free_list (2*min_obj_size)
struct plug
{
BYTE * skew[plug_skew / sizeof(BYTE *)];
@@ -4259,6 +4336,13 @@ extern "C" {
GARY_DECL(generation,generation_table,NUMBERGENERATIONS+1);
+#ifdef GC_CONFIG_DRIVEN
+GARY_DECL(size_t, interesting_data_per_heap, max_idp_count);
+GARY_DECL(size_t, compact_reasons_per_heap, max_compact_reasons_count);
+GARY_DECL(size_t, expand_mechanisms_per_heap, max_expand_mechanisms_count);
+GARY_DECL(size_t, interesting_mechanism_bits_per_heap, max_gc_mechanism_bits_count);
+#endif //GC_CONFIG_DRIVEN
+
#ifndef DACCESS_COMPILE
}
#endif //!DACCESS_COMPILE
diff --git a/src/gc/gcrecord.h b/src/gc/gcrecord.h
index 9128ef8d26..daf0d25b3a 100644
--- a/src/gc/gcrecord.h
+++ b/src/gc/gcrecord.h
@@ -203,11 +203,13 @@ struct maxgen_size_increase
// we'll record the can_expand_into_p result here.
enum gc_heap_expand_mechanism
{
- expand_reuse_normal,
- expand_reuse_bestfit,
- expand_new_seg_ep, // new seg with ephemeral promotion
- expand_new_seg,
- expand_no_memory // we can't get a new seg.
+ expand_reuse_normal = 0,
+ expand_reuse_bestfit = 1,
+ expand_new_seg_ep = 2, // new seg with ephemeral promotion
+ expand_new_seg = 3,
+ expand_no_memory = 4, // we can't get a new seg.
+ expand_next_full_gc = 5,
+ max_expand_mechanisms_count = 6
};
#ifdef DT_LOG
@@ -217,43 +219,85 @@ static char* str_heap_expand_mechanisms[] =
"reused seg with best fit",
"expand promoting eph",
"expand with a new seg",
- "no memory for a new seg"
+ "no memory for a new seg",
+ "expand in next full GC"
};
#endif //DT_LOG
-enum gc_compact_reason
+enum gc_heap_compact_reason
{
- compact_low_ephemeral,
- compact_high_frag,
- compact_no_gaps,
- compact_loh_forced
+ compact_low_ephemeral = 0,
+ compact_high_frag = 1,
+ compact_no_gaps = 2,
+ compact_loh_forced = 3,
+ compact_last_gc = 4,
+ compact_induced_compacting = 5,
+ compact_fragmented_gen0 = 6,
+ compact_high_mem_load = 7,
+ compact_high_mem_frag = 8,
+ compact_vhigh_mem_frag = 9,
+ compact_no_gc_mode = 10,
+ max_compact_reasons_count = 11
};
-#ifdef DT_LOG
-static char* str_compact_reasons[] =
+#ifndef DACCESS_COMPILE
+static BOOL gc_heap_compact_reason_mandatory_p[] =
{
- "low on ephemeral space",
- "high fragmetation",
- "couldn't allocate gaps",
- "user specfied compact LOH"
+ TRUE, //compact_low_ephemeral = 0,
+ FALSE, //compact_high_frag = 1,
+ TRUE, //compact_no_gaps = 2,
+ TRUE, //compact_loh_forced = 3,
+ TRUE, //compact_last_gc = 4
+ TRUE, //compact_induced_compacting = 5,
+ FALSE, //compact_fragmented_gen0 = 6,
+ FALSE, //compact_high_mem_load = 7,
+ TRUE, //compact_high_mem_frag = 8,
+ TRUE, //compact_vhigh_mem_frag = 9,
+ TRUE //compact_no_gc_mode = 10
};
-#endif //DT_LOG
+
+static BOOL gc_expand_mechanism_mandatory_p[] =
+{
+ FALSE, //expand_reuse_normal = 0,
+ TRUE, //expand_reuse_bestfit = 1,
+ FALSE, //expand_new_seg_ep = 2, // new seg with ephemeral promotion
+ TRUE, //expand_new_seg = 3,
+ FALSE, //expand_no_memory = 4, // we can't get a new seg.
+ TRUE //expand_next_full_gc = 5
+};
+#endif //!DACCESS_COMPILE
#ifdef DT_LOG
-static char* str_concurrent_compact_reasons[] =
+static char* str_heap_compact_reasons[] =
{
- "high fragmentation",
- "low on ephemeral space in concurrent marking"
+ "low on ephemeral space",
+ "high fragmetation",
+ "couldn't allocate gaps",
+ "user specfied compact LOH",
+ "last GC before OOM",
+ "induced compacting GC",
+ "fragmented gen0 (ephemeral GC)",
+ "high memory load (ephemeral GC)",
+ "high memory load and frag",
+ "very high memory load and frag",
+ "no gc mode"
};
#endif //DT_LOG
enum gc_mechanism_per_heap
{
gc_heap_expand,
- gc_compact,
+ gc_heap_compact,
max_mechanism_per_heap
};
+enum gc_mechanism_bit_per_heap
+{
+ gc_mark_list_bit = 0,
+ gc_demotion_bit = 1,
+ max_gc_mechanism_bits_count = 2
+};
+
#ifdef DT_LOG
struct gc_mechanism_descr
{
@@ -264,7 +308,7 @@ struct gc_mechanism_descr
static gc_mechanism_descr gc_mechanisms_descr[max_mechanism_per_heap] =
{
{"expanded heap ", str_heap_expand_mechanisms},
- {"compacted because of ", str_compact_reasons}
+ {"compacted because of ", str_heap_compact_reasons}
};
#endif //DT_LOG
@@ -281,26 +325,43 @@ public:
// The mechanisms data is compacted in the following way:
// most significant bit indicates if we did the operation.
- // the rest of the bits indicate the reason
+ // the rest of the bits indicate the reason/mechanism
// why we chose to do the operation. For example:
// if we did a heap expansion using best fit we'd have
// 0x80000002 for the gc_heap_expand mechanism.
// Only one value is possible for each mechanism - meaning the
// values are all exclusive
+ // TODO: for the config stuff I need to think more about how to represent this
+ // because we might want to know all reasons (at least all mandatory ones) for
+ // compact.
+ // TODO: no need to the MSB for this
DWORD mechanisms[max_mechanism_per_heap];
+ // Each bit in this DWORD represent if a mechanism was used or not.
+ DWORD machanism_bits;
+
DWORD heap_index;
size_t extra_gen0_committed;
- void set_mechanism (gc_mechanism_per_heap mechanism_per_heap, DWORD value)
+ void set_mechanism (gc_mechanism_per_heap mechanism_per_heap, DWORD value);
+
+ void set_mechanism_bit (gc_mechanism_bit_per_heap mech_bit)
{
- DWORD* mechanism = &mechanisms[mechanism_per_heap];
- *mechanism |= mechanism_mask;
- *mechanism |= (1 << value);
+ machanism_bits |= 1 << mech_bit;
}
- void clear_mechanism (gc_mechanism_per_heap mechanism_per_heap)
+ void clear_mechanism_bit (gc_mechanism_bit_per_heap mech_bit)
+ {
+ machanism_bits &= ~(1 << mech_bit);
+ }
+
+ BOOL is_mechanism_bit_set (gc_mechanism_bit_per_heap mech_bit)
+ {
+ return (machanism_bits & (1 << mech_bit));
+ }
+
+ void clear_mechanism(gc_mechanism_per_heap mechanism_per_heap)
{
DWORD* mechanism = &mechanisms[mechanism_per_heap];
*mechanism = 0;
@@ -327,12 +388,12 @@ public:
enum gc_global_mechanism_p
{
global_concurrent = 0,
- global_compaction,
- global_promotion,
- global_demotion,
- global_card_bundles,
- global_elevation,
- max_global_mechanism
+ global_compaction = 1,
+ global_promotion = 2,
+ global_demotion = 3,
+ global_card_bundles = 4,
+ global_elevation = 5,
+ max_global_mechanisms_count
};
struct gc_history_global
diff --git a/src/gc/handletable.cpp b/src/gc/handletable.cpp
index 59aa60a8f2..3bb232570e 100644
--- a/src/gc/handletable.cpp
+++ b/src/gc/handletable.cpp
@@ -803,18 +803,6 @@ void HndWriteBarrier(OBJECTHANDLE handle, OBJECTREF objref)
_ASSERTE (objref != NULL);
- // find out generation
- int generation = GCHeap::GetGCHeap()->WhichGeneration(value);
-
-#ifndef FEATURE_REDHAWK
- //OverlappedData need special treatment: because all user data pointed by it needs to be reported by this handle,
- //its age is consider to be min age of the user data, to be simple, we just make it 0
- if (HandleFetchType (handle) == HNDTYPE_ASYNCPINNED && objref->GetGCSafeMethodTable () == g_pOverlappedDataClass)
- {
- generation = 0;
- }
-#endif // !FEATURE_REDHAWK
-
// find the write barrier for this handle
BYTE *barrier = (BYTE *)((UINT_PTR)handle & HANDLE_SEGMENT_ALIGN_MASK);
@@ -838,16 +826,37 @@ void HndWriteBarrier(OBJECTHANDLE handle, OBJECTREF objref)
volatile BYTE * pClumpAge = barrier + offset;
// if this age is smaller than age of the clump, update the clump age
- if (*pClumpAge > (BYTE)generation)
+ if (*pClumpAge != 0) // Perf optimization: if clumpAge is 0, nothing more to do
{
- // We have to be careful here. HndWriteBarrier is not under any synchronization
- // Consider the scenario where 2 threads are hitting the line below at the same
- // time. Only one will win. If the winner has an older age than the loser, we
- // just created a potential GC hole (The clump will not be reporting the
- // youngest handle in the clump, thus GC may skip the clump). To fix this
- // we just set the clump age to 0, which means that whoever wins the race
- // results are the same, as GC will always look at the clump
- *pClumpAge = (BYTE)0;
+ // find out generation
+ int generation = GCHeap::GetGCHeap()->WhichGeneration(value);
+ UINT uType = HandleFetchType(handle);
+
+#ifndef FEATURE_REDHAWK
+ //OverlappedData need special treatment: because all user data pointed by it needs to be reported by this handle,
+ //its age is consider to be min age of the user data, to be simple, we just make it 0
+ if (uType == HNDTYPE_ASYNCPINNED && objref->GetGCSafeMethodTable () == g_pOverlappedDataClass)
+ {
+ generation = 0;
+ }
+#endif // !FEATURE_REDHAWK
+
+ if (uType == HNDTYPE_DEPENDENT)
+ {
+ generation = 0;
+ }
+
+ if (*pClumpAge > (BYTE) generation)
+ {
+ // We have to be careful here. HndWriteBarrier is not under any synchronization
+ // Consider the scenario where 2 threads are hitting the line below at the same
+ // time. Only one will win. If the winner has an older age than the loser, we
+ // just created a potential GC hole (The clump will not be reporting the
+ // youngest handle in the clump, thus GC may skip the clump). To fix this
+ // we just set the clump age to 0, which means that whoever wins the race
+ // results are the same, as GC will always look at the clump
+ *pClumpAge = (BYTE)0;
+ }
}
}
diff --git a/src/gc/handletablescan.cpp b/src/gc/handletablescan.cpp
index 37b23b52c7..8d9c77c4c7 100644
--- a/src/gc/handletablescan.cpp
+++ b/src/gc/handletablescan.cpp
@@ -904,12 +904,19 @@ void CALLBACK BlockResetAgeMapForBlocks(TableSegment *pSegment, UINT uBlock, UIN
#endif
}
-
-static void VerifyObject(_UNCHECKED_OBJECTREF *pValue, _UNCHECKED_OBJECTREF from, _UNCHECKED_OBJECTREF obj, BYTE minAge)
+static void VerifyObject(_UNCHECKED_OBJECTREF from, _UNCHECKED_OBJECTREF obj)
{
-#ifndef FEATURE_REDHAWK
+#ifdef FEATURE_REDHAWK
+ MethodTable* pMT = (MethodTable*)(obj->GetGCSafeMethodTable());
+ pMT->SanityCheck();
+#else
obj->ValidateHeap(from);
#endif // FEATURE_REDHAWK
+}
+
+static void VerifyObjectAndAge(_UNCHECKED_OBJECTREF *pValue, _UNCHECKED_OBJECTREF from, _UNCHECKED_OBJECTREF obj, BYTE minAge)
+{
+ VerifyObject(from, obj);
int thisAge = GCHeap::GetGCHeap()->WhichGeneration(obj);
@@ -947,7 +954,7 @@ static void VerifyObject(_UNCHECKED_OBJECTREF *pValue, _UNCHECKED_OBJECTREF from
* Also validates the objects themselves.
*
*/
-void BlockVerifyAgeMapForBlocksWorker(ULONG32 *pdwGen, ULONG32 dwClumpMask, ScanCallbackInfo *pInfo)
+void BlockVerifyAgeMapForBlocksWorker(ULONG32 *pdwGen, ULONG32 dwClumpMask, ScanCallbackInfo *pInfo, UINT uType)
{
WRAPPER_NO_CONTRACT;
@@ -963,7 +970,7 @@ void BlockVerifyAgeMapForBlocksWorker(ULONG32 *pdwGen, ULONG32 dwClumpMask, Scan
// loop over the clumps, scanning those that are identified by the mask
do
{
- // compute the last handle in this clump
+ // compute the last handle in this clump
_UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_CLUMP;
// if this clump should be scanned then scan it
@@ -975,7 +982,7 @@ void BlockVerifyAgeMapForBlocksWorker(ULONG32 *pdwGen, ULONG32 dwClumpMask, Scan
{
if (!HndIsNullOrDestroyedHandle(*pValue))
{
- VerifyObject(pValue, (*pValue), (*pValue), minAge);
+ VerifyObjectAndAge(pValue, (*pValue), (*pValue), minAge);
#ifndef FEATURE_REDHAWK
if ((*pValue)->GetGCSafeMethodTable() == g_pOverlappedDataClass)
{
@@ -984,7 +991,7 @@ void BlockVerifyAgeMapForBlocksWorker(ULONG32 *pdwGen, ULONG32 dwClumpMask, Scan
if (pOverlapped->m_userObject != NULL)
{
Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject);
- VerifyObject(pValue, (*pValue), pUserObject, minAge);
+ VerifyObjectAndAge(pValue, (*pValue), pUserObject, minAge);
if (pOverlapped->m_isArray)
{
ArrayBase* pUserArrayObject = (ArrayBase*)pUserObject;
@@ -992,12 +999,27 @@ void BlockVerifyAgeMapForBlocksWorker(ULONG32 *pdwGen, ULONG32 dwClumpMask, Scan
SIZE_T num = pUserArrayObject->GetNumComponents();
for (SIZE_T i = 0; i < num; i ++)
{
- VerifyObject(pValue, pUserObject, pObj[i], minAge);
+ VerifyObjectAndAge(pValue, pUserObject, pObj[i], minAge);
}
}
}
}
-#endif // !FEATURE_REDHAWK
+#endif // !FEATURE_REDHAWK
+
+ if (uType == HNDTYPE_DEPENDENT)
+ {
+ PTR_LPARAM pUserData = HandleQuickFetchUserDataPointer((OBJECTHANDLE)pValue);
+
+ // if we did then copy the value
+ if (pUserData)
+ {
+ _UNCHECKED_OBJECTREF pSecondary = (_UNCHECKED_OBJECTREF)(*pUserData);
+ if (pSecondary)
+ {
+ VerifyObject(pSecondary, pSecondary);
+ }
+ }
+ }
}
}
}
@@ -1011,7 +1033,6 @@ void BlockVerifyAgeMapForBlocksWorker(ULONG32 *pdwGen, ULONG32 dwClumpMask, Scan
} while (dwClumpMask);
}
-
/*
* BlockVerifyAgeMapForBlocks
*
@@ -1023,21 +1044,17 @@ void CALLBACK BlockVerifyAgeMapForBlocks(TableSegment *pSegment, UINT uBlock, UI
{
WRAPPER_NO_CONTRACT;
- // set up to update the specified blocks
- ULONG32 *pdwGen = (ULONG32 *)pSegment->rgGeneration + uBlock;
- ULONG32 *pdwGenLast = pdwGen + uCount;
-
- // loop over all the blocks, checking for eligible clumps as we go
- do
+ for (UINT u = 0; u < uCount; u++)
{
- BlockVerifyAgeMapForBlocksWorker(pdwGen, 0xFFFFFFFF, pInfo);
+ UINT uCur = (u + uBlock);
- // on to the next block's generation info
- pdwGen++;
+ ULONG32 *pdwGen = (ULONG32 *)pSegment->rgGeneration + uCur;
- } while (pdwGen < pdwGenLast);
-}
+ UINT uType = pSegment->rgBlockType[uCur];
+ BlockVerifyAgeMapForBlocksWorker(pdwGen, 0xFFFFFFFF, pInfo, uType);
+ }
+}
/*
* BlockLockBlocks
diff --git a/src/gc/objecthandle.cpp b/src/gc/objecthandle.cpp
index b62407a765..5816f529bc 100644
--- a/src/gc/objecthandle.cpp
+++ b/src/gc/objecthandle.cpp
@@ -1805,6 +1805,7 @@ void Ref_VerifyHandleTable(UINT condemned, UINT maxgen, ScanContext* sc)
#endif // FEATURE_COMINTEROP
HNDTYPE_ASYNCPINNED,
HNDTYPE_SIZEDREF,
+ HNDTYPE_DEPENDENT,
};
// verify these handles
diff --git a/src/inc/clrconfigvalues.h b/src/inc/clrconfigvalues.h
index 8f33e40c27..062ffb6e01 100644
--- a/src/inc/clrconfigvalues.h
+++ b/src/inc/clrconfigvalues.h
@@ -319,9 +319,12 @@ RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_StatsUpdatePeriod, W("StatsUpdatePeriod"),
RETAIL_CONFIG_STRING_INFO(UNSUPPORTED_SuspendTimeLog, W("SuspendTimeLog"), "Specifies the name of the log file for suspension statistics")
RETAIL_CONFIG_STRING_INFO(UNSUPPORTED_GCMixLog, W("GCMixLog"), "Specifies the name of the log file for GC mix statistics")
CONFIG_DWORD_INFO_DIRECT_ACCESS(INTERNAL_GCLatencyMode, W("GCLatencyMode"), "Specifies the GC latency mode - batch, interactive or low latency (note that the same thing can be specified via API which is the supported way)")
+RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_GCConfigLogEnabled, W("GCConfigLogEnabled"), 0, "Specifies if you want to turn on config logging in GC")
RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_GCLogEnabled, W("GCLogEnabled"), 0, "Specifies if you want to turn on logging in GC")
RETAIL_CONFIG_STRING_INFO(UNSUPPORTED_GCLogFile, W("GCLogFile"), "Specifies the name of the GC log file")
-RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_GCLogFileSize, W("GCLogFileSize"), 0, "Specifies the maximum GC log file size")
+RETAIL_CONFIG_STRING_INFO(UNSUPPORTED_GCConfigLogFile, W("GCConfigLogFile"), "Specifies the name of the GC config log file")
+RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_GCLogFileSize, W("GCLogFileSize"), 0, "Specifies the GC log file size")
+RETAIL_CONFIG_DWORD_INFO(UNSUPPORTED_GCCompactRatio, W("GCCompactRatio"), 0, "Specifies the ratio compacting GCs vs sweeping ")
RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(EXTERNAL_GCPollType, W("GCPollType"), "")
RETAIL_CONFIG_STRING_INFO_EX(EXTERNAL_NewGCCalc, W("NewGCCalc"), "", CLRConfig::REGUTIL_default)
RETAIL_CONFIG_DWORD_INFO_DIRECT_ACCESS(UNSUPPORTED_GCprnLvl, W("GCprnLvl"), "Specifies the maximum level of GC logging")
@@ -598,6 +601,8 @@ RETAIL_CONFIG_STRING_INFO(INTERNAL_WinMDPath, W("WinMDPath"), "Path for Windows
//
CONFIG_DWORD_INFO_EX(INTERNAL_LoaderHeapCallTracing, W("LoaderHeapCallTracing"), 0, "Loader heap troubleshooting", CLRConfig::REGUTIL_default)
RETAIL_CONFIG_DWORD_INFO(INTERNAL_CodeHeapReserveForJumpStubs, W("CodeHeapReserveForJumpStubs"), 2, "Percentage of code heap to reserve for jump stubs")
+RETAIL_CONFIG_DWORD_INFO(INTERNAL_NGenReserveForJumpStubs, W("NGenReserveForJumpStubs"), 0, "Percentage of ngen image size to reserve for jump stubs")
+RETAIL_CONFIG_DWORD_INFO(INTERNAL_BreakOnOutOfMemoryWithinRange, W("BreakOnOutOfMemoryWithinRange"), 0, "Break before out of memory within range exception is thrown")
//
// Log
diff --git a/src/inc/corerror.xml b/src/inc/corerror.xml
index e4ddd973b0..98d41ea845 100644
--- a/src/inc/corerror.xml
+++ b/src/inc/corerror.xml
@@ -3650,6 +3650,11 @@
<Comment> This call can't be completed safely because the runtime is not suspended </Comment>
</HRESULT>
-->
+
+<HRESULT NumericValue="0x80131382">
+ <SymbolicName>CORPROF_E_CALLBACK7_REQUIRED</SymbolicName>
+ <Comment> Profiler must implement ICorProfilerCallback7 interface for this call to be supported. </Comment>
+</HRESULT>
<HRESULT NumericValue="0x80131400">
<SymbolicName>SECURITY_E_XML_TO_ASN_ENCODING</SymbolicName>
diff --git a/src/inc/corprof.idl b/src/inc/corprof.idl
index 894a8850be..328fbe01e1 100644
--- a/src/inc/corprof.idl
+++ b/src/inc/corprof.idl
@@ -615,9 +615,11 @@ typedef enum
COR_PRF_HIGH_ADD_ASSEMBLY_REFERENCES = 0x00000001,
+ COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED = 0x00000002,
+
COR_PRF_HIGH_REQUIRE_PROFILE_IMAGE = 0,
- COR_PRF_HIGH_ALLOWABLE_AFTER_ATTACH = 0,
+ COR_PRF_HIGH_ALLOWABLE_AFTER_ATTACH = COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED,
// MONITOR_IMMUTABLE represents all flags that may only be set during initialization.
// Trying to change any of these flags elsewhere will result in a
@@ -2349,6 +2351,29 @@ interface ICorProfilerCallback6 : ICorProfilerCallback5
};
+[
+ object,
+ uuid(F76A2DBA-1D52-4539-866C-2AA518F9EFC3),
+ pointer_default(unique),
+ local
+]
+interface ICorProfilerCallback7 : ICorProfilerCallback6
+{
+ // This event is triggered whenever the symbol stream associated with an
+ // in-memory module is updated. Even when symbols are provided up-front in
+ // a call to the managed API Assembly.Load(byte[], byte[], ...) the runtime
+ // may not actually associate the symbolic data with the module until after
+ // the ModuleLoadFinished callback has occured. This event provides a later
+ // opportunity to collect symbols for such modules.
+ //
+ // This event is controlled by the COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED
+ // event mask flag.
+ //
+ // Note: This event is not currently raised for symbols implicitly created or
+ // modified via Reflection.Emit APIs.
+ HRESULT ModuleInMemorySymbolsUpdated(ModuleID moduleId);
+}
+
/*
* COR_PRF_CODEGEN_FLAGS controls various flags and hooks for a specific
@@ -3699,6 +3724,62 @@ interface ICorProfilerInfo6 : ICorProfilerInfo5
[out] ICorProfilerMethodEnum** ppEnum);
};
+[
+ object,
+ uuid(9AEECC0D-63E0-4187-8C00-E312F503F663),
+ pointer_default(unique),
+ local
+]
+interface ICorProfilerInfo7 : ICorProfilerInfo6
+{
+ /*
+ * Applies the newly emitted Metadata.
+ *
+ * This method can be used to apply the newly defined metadata by IMetadataEmit::Define* methods
+ * to the module.
+ *
+ * If metadata changes are made after ModuleLoadFinished callback,
+ * it is required to call this method before using the new metadata
+ */
+ HRESULT ApplyMetaData(
+ [in] ModuleID moduleId);
+
+ /* Returns the length of an in-memory symbol stream
+ *
+ * If the module has in-memory symbols the length of the stream will
+ * be placed in pCountSymbolBytes. If the module doesn't have in-memory
+ * symbols, *pCountSymbolBytes = 0
+ *
+ * Returns S_OK if the length could be determined (even if it is 0)
+ *
+ * Note: The current implementation does not support reflection.emit.
+ * CORPROF_E_MODULE_IS_DYNAMIC will be returned in that case.
+ */
+ HRESULT GetInMemorySymbolsLength(
+ [in] ModuleID moduleId,
+ [out] DWORD* pCountSymbolBytes);
+
+ /* Reads bytes from an in-memory symbol stream
+ *
+ * This function attempts to read countSymbolBytes of data starting at offset
+ * symbolsReadOffset within the in-memory stream. The data will be copied into
+ * pSymbolBytes which is expected to have countSymbolBytes of space available.
+ * pCountSymbolsBytesRead contains the actual number of bytes read which
+ * may be less than countSymbolBytes if the end of the stream is reached.
+ *
+ * Returns S_OK if a non-zero number of bytes were read.
+ *
+ * Note: The current implementation does not support reflection.emit.
+ * CORPROF_E_MODULE_IS_DYNAMIC will be returned in that case.
+ */
+ HRESULT ReadInMemorySymbols(
+ [in] ModuleID moduleId,
+ [in] DWORD symbolsReadOffset,
+ [out] BYTE* pSymbolBytes,
+ [in] DWORD countSymbolBytes,
+ [out] DWORD* pCountSymbolBytesRead);
+
+};
/*
* This interface lets you iterate over methods in the runtime.
diff --git a/src/inc/dacprivate.h b/src/inc/dacprivate.h
index 087d89b2f1..5ec6e7fead 100644
--- a/src/inc/dacprivate.h
+++ b/src/inc/dacprivate.h
@@ -753,6 +753,61 @@ struct DacpOomData : ZeroInit<DacpOomData>
}
};
+// This is the value of max_idp_count in ndp\clr\src\vm\gcpriv.h
+#define NUM_GC_DATA_POINTS 9
+// These are from ndp\clr\src\vm\gcrecord.h
+#define MAX_COMPACT_REASONS_COUNT 11
+#define MAX_EXPAND_MECHANISMS_COUNT 6
+#define MAX_GC_MECHANISM_BITS_COUNT 2
+// This is from ndp\clr\src\vm\common.h
+#define MAX_GLOBAL_GC_MECHANISMS_COUNT 6
+struct DacpGCInterestingInfoData : ZeroInit<DacpGCInterestingInfoData>
+{
+ size_t interestingDataPoints[NUM_GC_DATA_POINTS];
+ size_t compactReasons[MAX_COMPACT_REASONS_COUNT];
+ size_t expandMechanisms[MAX_EXPAND_MECHANISMS_COUNT];
+ size_t bitMechanisms[MAX_GC_MECHANISM_BITS_COUNT];
+ size_t globalMechanisms[MAX_GLOBAL_GC_MECHANISMS_COUNT];
+
+ HRESULT RequestGlobal(ISOSDacInterface *sos)
+ {
+ HRESULT hr;
+ ISOSDacInterface3 *psos3 = NULL;
+ if (SUCCEEDED(hr = sos->QueryInterface(__uuidof(ISOSDacInterface3), (void**) &psos3)))
+ {
+ hr = psos3->GetGCGlobalMechanisms(globalMechanisms);
+ psos3->Release();
+ }
+ return hr;
+ }
+
+ HRESULT Request(ISOSDacInterface *sos)
+ {
+ HRESULT hr;
+ ISOSDacInterface3 *psos3 = NULL;
+ if (SUCCEEDED(hr = sos->QueryInterface(__uuidof(ISOSDacInterface3), (void**) &psos3)))
+ {
+ hr = psos3->GetGCInterestingInfoStaticData(this);
+ psos3->Release();
+ }
+ return hr;
+ }
+
+ // Use this for Server mode, as there are multiple heaps,
+ // and you need to pass a heap address in addr.
+ HRESULT Request(ISOSDacInterface *sos, CLRDATA_ADDRESS addr)
+ {
+ HRESULT hr;
+ ISOSDacInterface3 *psos3 = NULL;
+ if (SUCCEEDED(hr = sos->QueryInterface(__uuidof(ISOSDacInterface3), (void**) &psos3)))
+ {
+ hr = psos3->GetGCInterestingInfoData(addr, this);
+ psos3->Release();
+ }
+ return hr;
+ }
+};
+
struct DacpGcHeapAnalyzeData
: ZeroInit<DacpGcHeapAnalyzeData>
{
diff --git a/src/inc/dacvars.h b/src/inc/dacvars.h
index 560d63a795..4195b863aa 100644
--- a/src/inc/dacvars.h
+++ b/src/inc/dacvars.h
@@ -201,6 +201,14 @@ DEFINE_DACVAR(ULONG, PTR_BYTE, dac__g_highest_address, ::g_highest_address)
DEFINE_DACVAR(ULONG, GCHeap, dac__g_pGCHeap, ::g_pGCHeap)
+#ifdef GC_CONFIG_DRIVEN
+DEFINE_DACVAR_NO_DUMP(ULONG, SIZE_T, dac__interesting_data_per_heap, ::interesting_data_per_heap)
+DEFINE_DACVAR_NO_DUMP(ULONG, SIZE_T, dac__compact_reasons_per_heap, ::compact_reasons_per_heap)
+DEFINE_DACVAR_NO_DUMP(ULONG, SIZE_T, dac__expand_mechanisms_per_heap, ::expand_mechanisms_per_heap)
+DEFINE_DACVAR_NO_DUMP(ULONG, SIZE_T, dac__interesting_mechanism_bits_per_heap, ::interesting_mechanism_bits_per_heap)
+DEFINE_DACVAR_NO_DUMP(ULONG, SIZE_T, dac__gc_global_mechanisms, ::gc_global_mechanisms)
+#endif //GC_CONFIG_DRIVEN
+
DEFINE_DACVAR(ULONG, UNKNOWN_POINTER_TYPE, dac__g_pThinLockThreadIdDispenser, ::g_pThinLockThreadIdDispenser)
DEFINE_DACVAR(ULONG, UNKNOWN_POINTER_TYPE, dac__g_pModuleIndexDispenser, ::g_pModuleIndexDispenser)
DEFINE_DACVAR(ULONG, UNKNOWN_POINTER_TYPE, dac__g_pObjectClass, ::g_pObjectClass)
diff --git a/src/inc/profilepriv.inl b/src/inc/profilepriv.inl
index 8e0e4ffa21..d8997fd8b6 100644
--- a/src/inc/profilepriv.inl
+++ b/src/inc/profilepriv.inl
@@ -722,6 +722,21 @@ inline BOOL CORProfilerAddsAssemblyReferences()
((&g_profControlBlock)->dwEventMaskHigh & COR_PRF_HIGH_ADD_ASSEMBLY_REFERENCES));
}
+inline BOOL CORProfilerInMemorySymbolsUpdatesEnabled()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CANNOT_TAKE_LOCK;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ return (CORProfilerPresent() &&
+ ((&g_profControlBlock)->dwEventMaskHigh & COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED));
+}
+
#if defined(PROFILING_SUPPORTED) && !defined(CROSSGEN_COMPILE)
#if defined(FEATURE_PROFAPI_ATTACH_DETACH)
diff --git a/src/inc/simplerhash.h b/src/inc/simplerhash.h
index 37b0b8fb0b..538eacf059 100644
--- a/src/inc/simplerhash.h
+++ b/src/inc/simplerhash.h
@@ -467,10 +467,49 @@ struct LargePrimitiveKeyFuncs: public KeyFuncsDefEquals<T>
{
static unsigned GetHashCode(const T val)
{
- UINT64 asUINT64 = static_cast<UINT64>(val);
- unsigned res = asUINT64 >> 32;
- res = res ^ static_cast<unsigned>(asUINT64 & ((((UINT64)1) << 32) - 1));
- return res;
+ // A static cast when T is a float or a double converts the value (i.e. 0.25 converts to 0)
+ //
+ // Instead we want to use all of the bits of a float to create the hash value
+ // So we cast the address of val to a pointer to an equivalent sized unsigned int
+ // This allows us to read the actual bit representation of a float type
+ //
+ // We can't read beyond the end of val, so we use sizeof(T) to determine
+ // exactly how many bytes to read
+ //
+ if (sizeof(T) == 8)
+ {
+ // cast &val to (UINT64 *) then deref to get the bits
+ UINT64 asUINT64 = *(reinterpret_cast<const UINT64 *>(&val));
+
+ // Get the upper and lower 32-bit values from the 64-bit value
+ UINT32 upper32 = static_cast<UINT32> (asUINT64 >> 32);
+ UINT32 lower32 = static_cast<UINT32> (asUINT64 & 0xFFFFFFFF);
+
+ // Exclusive-Or the upper32 and the lower32 values
+ return static_cast<unsigned>(upper32 ^ lower32);
+
+ }
+ else if (sizeof(T) == 4)
+ {
+ // cast &val to (UINT32 *) then deref to get the bits
+ UINT32 asUINT32 = *(reinterpret_cast<const UINT32 *>(&val));
+
+ // Just return the 32-bit value
+ return static_cast<unsigned>(asUINT32);
+ }
+ else if ((sizeof(T) == 2) || (sizeof(T) == 1))
+ {
+ // For small sizes we must have an integer type
+ // so we can just use the static_cast.
+ //
+ return static_cast<unsigned>(val);
+ }
+ else
+ {
+ // Only support Hashing for types that are 8,4,2 or 1 bytes in size
+ assert(!"Unsupported size");
+ return static_cast<unsigned>(val); // compile-time error here when we have a illegal size
+ }
}
};
diff --git a/src/inc/sospriv.idl b/src/inc/sospriv.idl
index 9ef6cc2ffa..48c76cbf87 100644
--- a/src/inc/sospriv.idl
+++ b/src/inc/sospriv.idl
@@ -336,3 +336,15 @@ interface ISOSDacInterface2 : IUnknown
HRESULT GetObjectExceptionData(CLRDATA_ADDRESS objAddr, struct DacpExceptionObjectData *data);
HRESULT IsRCWDCOMProxy(CLRDATA_ADDRESS rcwAddr, BOOL* isDCOMProxy);
};
+
+[
+ object,
+ local,
+ uuid(B08C5CDC-FD8A-49C5-AB38-5FEEF35235B4)
+]
+interface ISOSDacInterface3 : IUnknown
+{
+ HRESULT GetGCInterestingInfoData(CLRDATA_ADDRESS interestingInfoAddr, struct DacpGCInterestingInfoData *data);
+ HRESULT GetGCInterestingInfoStaticData(struct DacpGCInterestingInfoData *data);
+ HRESULT GetGCGlobalMechanisms(size_t* globalMechanisms);
+}; \ No newline at end of file
diff --git a/src/inc/switches.h b/src/inc/switches.h
index b298b5563b..dd4da272c0 100644
--- a/src/inc/switches.h
+++ b/src/inc/switches.h
@@ -20,6 +20,8 @@
#define VERIFY_HEAP
#endif
+#define GC_CONFIG_DRIVEN
+
// define this to test data safety for the DAC. See code:DataTest::TestDataSafety.
#define TEST_DATA_CONSISTENCY
diff --git a/src/md/winmd/inc/adapter.h b/src/md/winmd/inc/adapter.h
index 748d65d473..111c9bd94d 100644
--- a/src/md/winmd/inc/adapter.h
+++ b/src/md/winmd/inc/adapter.h
@@ -633,7 +633,11 @@ public:
case FrameworkAssembly_SystemRuntimeWindowsRuntimeUIXaml:
return "System.Runtime.WindowsRuntime.UI.Xaml";
case FrameworkAssembly_SystemNumericsVectors:
+#ifdef FEATURE_CORECLR
return "System.Numerics.Vectors";
+#else
+ return "System.Numerics";
+#endif
default:
_ASSERTE(!"Invalid AssemblyRef token!");
return NULL;
diff --git a/src/mscorlib/src/System/AppContext/AppContext.cs b/src/mscorlib/src/System/AppContext/AppContext.cs
index d1416f7af1..feef21b787 100644
--- a/src/mscorlib/src/System/AppContext/AppContext.cs
+++ b/src/mscorlib/src/System/AppContext/AppContext.cs
@@ -26,7 +26,9 @@ namespace System
#endif
get
{
- return AppDomain.CurrentDomain.BaseDirectory;
+ // The value of APP_CONTEXT_BASE_DIRECTORY key has to be a string and it is not allowed to be any other type.
+ // Otherwise the caller will get invalid cast exception
+ return (string) AppDomain.CurrentDomain.GetData("APP_CONTEXT_BASE_DIRECTORY") ?? AppDomain.CurrentDomain.BaseDirectory;
}
}
diff --git a/src/mscorlib/src/System/Collections/Concurrent/ConcurrentDictionary.cs b/src/mscorlib/src/System/Collections/Concurrent/ConcurrentDictionary.cs
index 38085b8d4f..f15cb00198 100644
--- a/src/mscorlib/src/System/Collections/Concurrent/ConcurrentDictionary.cs
+++ b/src/mscorlib/src/System/Collections/Concurrent/ConcurrentDictionary.cs
@@ -84,10 +84,9 @@ namespace System.Collections.Concurrent
// that generate collisions. Whenever a GrowTable() should be the only place that changes this
#if !FEATURE_CORECLR
// The field should be have been marked as NonSerialized but because we shipped it without that attribute in 4.5.1.
- // we have to also add the OptionalField attribute to prevent cases where the field was serialized and we try to deserialize it after the fix.
- // See DevDiv:899074 for more information
+ // we can't add it back without breaking compat. To maximize compat we are going to keep the OptionalField attribute
+ // This will prevent cases where the field was not serialized.
[OptionalField]
- [NonSerialized]
#endif
private int m_keyRehashCount;
diff --git a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleTypeInfos.cs b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleTypeInfos.cs
index 6490a3a2dd..461ed76a0f 100644
--- a/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleTypeInfos.cs
+++ b/src/mscorlib/src/System/Diagnostics/Eventing/TraceLogging/SimpleTypeInfos.cs
@@ -656,7 +656,7 @@ namespace System.Diagnostics.Tracing
public override object GetData(object value)
{
- return (Byte)value;
+ return value;
}
}
@@ -678,7 +678,7 @@ namespace System.Diagnostics.Tracing
public override object GetData(object value)
{
- return (SByte)value;
+ return value;
}
}
@@ -700,7 +700,7 @@ namespace System.Diagnostics.Tracing
public override object GetData(object value)
{
- return (Int16)value;
+ return value;
}
}
@@ -722,7 +722,7 @@ namespace System.Diagnostics.Tracing
public override object GetData(object value)
{
- return (UInt16)value;
+ return value;
}
}
@@ -744,7 +744,7 @@ namespace System.Diagnostics.Tracing
public override object GetData(object value)
{
- return (Int32)value;
+ return value;
}
}
@@ -766,7 +766,7 @@ namespace System.Diagnostics.Tracing
public override object GetData(object value)
{
- return (UInt32)value;
+ return value;
}
}
@@ -788,7 +788,7 @@ namespace System.Diagnostics.Tracing
public override object GetData(object value)
{
- return (Int64)value;
+ return value;
}
}
@@ -810,7 +810,7 @@ namespace System.Diagnostics.Tracing
public override object GetData(object value)
{
- return (UInt64)value;
+ return value;
}
}
diff --git a/src/mscorlib/src/System/Globalization/CultureData.cs b/src/mscorlib/src/System/Globalization/CultureData.cs
index 9911ef35cf..fa8926bfaf 100644
--- a/src/mscorlib/src/System/Globalization/CultureData.cs
+++ b/src/mscorlib/src/System/Globalization/CultureData.cs
@@ -1180,24 +1180,10 @@ namespace System.Globalization
if (this.sLocalizedDisplayName == null)
{
#if !FEATURE_CORECLR
- if (this.IsSupplementalCustomCulture)
+ String resourceKey = "Globalization.ci_" + this.sName;
+ if (IsResourcePresent(resourceKey))
{
- if (this.IsNeutralCulture)
- {
- this.sLocalizedDisplayName = this.SNATIVELANGUAGE;
- }
- else
- {
- this.sLocalizedDisplayName = this.SNATIVEDISPLAYNAME;
- }
- }
- else
- {
- String resourceKey = "Globalization.ci_" + this.sName;
- if (IsResourcePresent(resourceKey))
- {
- this.sLocalizedDisplayName = Environment.GetResourceString(resourceKey);
- }
+ this.sLocalizedDisplayName = Environment.GetResourceString(resourceKey);
}
#endif
// If it hasn't been found (Windows 8 and up), fallback to the system
@@ -1518,17 +1504,10 @@ namespace System.Globalization
if (this.sLocalizedCountry == null)
{
#if !FEATURE_CORECLR
- if (this.IsSupplementalCustomCulture)
- {
- this.sLocalizedCountry = SNATIVECOUNTRY;
- }
- else
+ String resourceKey = "Globalization.ri_" + this.SREGIONNAME;
+ if (IsResourcePresent(resourceKey))
{
- String resourceKey = "Globalization.ri_" + this.SREGIONNAME;
- if (IsResourcePresent(resourceKey))
- {
- this.sLocalizedCountry = Environment.GetResourceString(resourceKey);
- }
+ this.sLocalizedCountry = Environment.GetResourceString(resourceKey);
}
#endif
// If it hasn't been found (Windows 8 and up), fallback to the system
diff --git a/src/mscorlib/src/mscorlib.txt b/src/mscorlib/src/mscorlib.txt
index 6435d1c85d..2298b2065d 100644
--- a/src/mscorlib/src/mscorlib.txt
+++ b/src/mscorlib/src/mscorlib.txt
@@ -2647,7 +2647,7 @@ Globalization.ci_ewo = Ewondo
Globalization.ci_ewo-CM = Ewondo (Cameroon)
Globalization.ci_fa = Persian
Globalization.ci_fa-AF = Persian (Afghanistan)
-Globalization.ci_fa-IR = Persian
+Globalization.ci_fa-IR = Persian (Iran)
Globalization.ci_ff = Fulah
Globalization.ci_ff-CM = Fulah (Cameroon)
Globalization.ci_ff-GN = Fulah (Guinea)
diff --git a/src/pal/prebuilt/idl/clrinternal_i.c b/src/pal/prebuilt/idl/clrinternal_i.c
index 32f23c5d7b..c2e8ba5cb5 100644
--- a/src/pal/prebuilt/idl/clrinternal_i.c
+++ b/src/pal/prebuilt/idl/clrinternal_i.c
@@ -70,18 +70,6 @@ MIDL_DEFINE_GUID(IID, IID_IEEMemoryManager,0x17713B61,0xB59F,0x4e13,0xBA,0xAF,0x
MIDL_DEFINE_GUID(IID, IID_IPrivateManagedExceptionReporting,0xAD76A023,0x332D,0x4298,0x80,0x01,0x07,0xAA,0x93,0x50,0xDC,0xA4);
-
-MIDL_DEFINE_GUID(IID, IID_ICLRRuntimeHostInternal,0x07C4E752,0x3CBA,0x4A07,0x99,0x43,0xB5,0xF2,0x06,0x38,0x21,0x78);
-
-
-MIDL_DEFINE_GUID(IID, IID_ICLRShimControlInternal,0x826AAAD7,0x717B,0x44f8,0x9B,0xB0,0x7D,0xAC,0x36,0x8B,0x85,0xA5);
-
-
-MIDL_DEFINE_GUID(IID, IID_ICLRActivationFactory,0x331F2F6C,0x385F,0x462c,0x91,0x25,0x81,0x67,0x12,0xFB,0x2B,0xC6);
-
-
-MIDL_DEFINE_GUID(IID, IID_ICLRActivationFactory2,0x035049E5,0x2658,0x40C0,0x92,0x69,0x21,0xC4,0x8D,0x8F,0x07,0x48);
-
#undef MIDL_DEFINE_GUID
#ifdef __cplusplus
diff --git a/src/pal/prebuilt/idl/corprof_i.c b/src/pal/prebuilt/idl/corprof_i.c
index f01943f92f..45584f3805 100644
--- a/src/pal/prebuilt/idl/corprof_i.c
+++ b/src/pal/prebuilt/idl/corprof_i.c
@@ -78,6 +78,9 @@ MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback5,0x8DFBA405,0x8C9F,0x45F8,0xBF,0x
MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback6,0xFC13DF4B,0x4448,0x4F4F,0x95,0x0C,0xBA,0x8D,0x19,0xD0,0x0C,0x36);
+MIDL_DEFINE_GUID(IID, IID_ICorProfilerCallback7,0xF76A2DBA,0x1D52,0x4539,0x86,0x6C,0x2A,0xA5,0x18,0xF9,0xEF,0xC3);
+
+
MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo,0x28B5557D,0x3F3F,0x48b4,0x90,0xB2,0x5F,0x9E,0xEA,0x2F,0x6C,0x48);
@@ -111,6 +114,9 @@ MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo5,0x07602928,0xCE38,0x4B83,0x81,0xE7,0
MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo6,0xF30A070D,0xBFFB,0x46A7,0xB1,0xD8,0x87,0x81,0xEF,0x7B,0x69,0x8A);
+MIDL_DEFINE_GUID(IID, IID_ICorProfilerInfo7,0x9AEECC0D,0x63E0,0x4187,0x8C,0x00,0xE3,0x12,0xF5,0x03,0xF6,0x63);
+
+
MIDL_DEFINE_GUID(IID, IID_ICorProfilerMethodEnum,0xFCCEE788,0x0088,0x454B,0xA8,0x11,0xC9,0x9F,0x29,0x8D,0x19,0x42);
diff --git a/src/pal/prebuilt/inc/clretwall.h b/src/pal/prebuilt/inc/clretwall.h
index a3df060dc5..cf28b49f44 100644
--- a/src/pal/prebuilt/inc/clretwall.h
+++ b/src/pal/prebuilt/inc/clretwall.h
@@ -218,7 +218,7 @@ Remarks:
#endif
#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION
//+
-// Provider Microsoft-Windows-DotNETRuntime Event Count 167
+// Provider Microsoft-Windows-DotNETRuntime Event Count 168
//+
EXTERN_C __declspec(selectany) const GUID MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER = {0xe13c0d23, 0xccbc, 0x4e12, {0x93, 0x1b, 0xd9, 0xcc, 0x2e, 0xee, 0x27, 0xe4}};
@@ -744,6 +744,8 @@ EXTERN_C __declspec(selectany) const EVENT_DESCRIPTOR DebugExceptionProcessingEn
#define DebugExceptionProcessingEnd_value 0xf3
EXTERN_C __declspec(selectany) const EVENT_DESCRIPTOR CodeSymbols = {0x104, 0x0, 0x0, 0x5, 0x1, 0x1e, 0x400000000};
#define CodeSymbols_value 0x104
+EXTERN_C __declspec(selectany) const EVENT_DESCRIPTOR EventSource = {0x10e, 0x0, 0x0, 0x4, 0x1, 0x0, 0x0};
+#define EventSource_value 0x10e
//
// Note on Generate Code from Manifest Windows Vista and above
@@ -3210,6 +3212,20 @@ Remarks:
CoTemplate_xhhqbh(Microsoft_Windows_DotNETRuntimeHandle, &CodeSymbols, ModuleId, TotalChunks, ChunkNumber, ChunkLength, Chunk, ClrInstanceID)\
: ERROR_SUCCESS\
+//
+// Enablement check macro for EventSource
+//
+
+#define EventEnabledEventSource() ((Microsoft_Windows_DotNETRuntimeEnableBits[0] & 0x00002000) != 0)
+
+//
+// Event Macro for EventSource
+//
+#define FireEtwEventSource(EventID, EventName, EventSourceName, Payload)\
+ EventEnabledEventSource() ?\
+ CoTemplate_dzzz(Microsoft_Windows_DotNETRuntimeHandle, &EventSource, EventID, EventName, EventSourceName, Payload)\
+ : ERROR_SUCCESS\
+
#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION
//+
@@ -10977,6 +10993,54 @@ MCGEN_CALLOUT(RegHandle,
#endif
//
+//Template from manifest : EventSource
+//
+#ifndef CoTemplate_dzzz_def
+#define CoTemplate_dzzz_def
+ETW_INLINE
+ULONG
+CoTemplate_dzzz(
+ _In_ REGHANDLE RegHandle,
+ _In_ PCEVENT_DESCRIPTOR Descriptor,
+ _In_ const signed int _Arg0,
+ _In_opt_ PCWSTR _Arg1,
+ _In_opt_ PCWSTR _Arg2,
+ _In_opt_ PCWSTR _Arg3
+ )
+{
+#define ARGUMENT_COUNT_dzzz 4
+ ULONG Error = ERROR_SUCCESS;
+
+ EVENT_DATA_DESCRIPTOR EventData[ARGUMENT_COUNT_dzzz];
+
+ EventDataDescCreate(&EventData[0], &_Arg0, sizeof(const signed int) );
+
+ EventDataDescCreate(&EventData[1],
+ (_Arg1 != NULL) ? _Arg1 : L"NULL",
+ (_Arg1 != NULL) ? (ULONG)((wcslen(_Arg1) + 1) * sizeof(WCHAR)) : (ULONG)sizeof(L"NULL"));
+
+ EventDataDescCreate(&EventData[2],
+ (_Arg2 != NULL) ? _Arg2 : L"NULL",
+ (_Arg2 != NULL) ? (ULONG)((wcslen(_Arg2) + 1) * sizeof(WCHAR)) : (ULONG)sizeof(L"NULL"));
+
+ EventDataDescCreate(&EventData[3],
+ (_Arg3 != NULL) ? _Arg3 : L"NULL",
+ (_Arg3 != NULL) ? (ULONG)((wcslen(_Arg3) + 1) * sizeof(WCHAR)) : (ULONG)sizeof(L"NULL"));
+
+ Error = EventWrite(RegHandle, Descriptor, ARGUMENT_COUNT_dzzz, EventData);
+
+#ifdef MCGEN_CALLOUT
+MCGEN_CALLOUT(RegHandle,
+ Descriptor,
+ ARGUMENT_COUNT_dzzz,
+ EventData);
+#endif
+
+ return Error;
+}
+#endif
+
+//
//Template from manifest : StressLog
//
#ifndef CoTemplate_qcs_def
diff --git a/src/pal/prebuilt/inc/clretwallmain.h b/src/pal/prebuilt/inc/clretwallmain.h
index a17e84cd15..7f9ff40806 100644
--- a/src/pal/prebuilt/inc/clretwallmain.h
+++ b/src/pal/prebuilt/inc/clretwallmain.h
@@ -9,7 +9,7 @@
#define MAX_BYTES_PER_ETW_PROVIDER 64
EXTERN_C __declspec(selectany) const BYTE etwStackSupportedEvents[NO_OF_ETW_PROVIDERS][MAX_BYTES_PER_ETW_PROVIDER] =
{
- {0, 4, 16, 192, 9, 255, 3, 241, 195, 0, 251, 3, 0, 0, 0, 0, 128, 31, 226, 63, 0, 0, 64, 65, 0, 43, 0, 0, 0, 0, 15, 252, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ {0, 4, 16, 192, 9, 255, 3, 241, 195, 0, 251, 3, 0, 0, 0, 0, 128, 31, 226, 63, 0, 0, 64, 65, 0, 43, 0, 0, 0, 0, 15, 252, 17, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
diff --git a/src/pal/prebuilt/inc/corerror.h b/src/pal/prebuilt/inc/corerror.h
index aaa3d6f0d3..5c76d20aef 100644
--- a/src/pal/prebuilt/inc/corerror.h
+++ b/src/pal/prebuilt/inc/corerror.h
@@ -633,6 +633,7 @@
#define CORPROF_E_FUNCTION_IS_COLLECTIBLE EMAKEHR(0x137e)
#define CORPROF_E_REJIT_REQUIRES_DISABLE_NGEN EMAKEHR(0x137f)
#define CORPROF_E_CALLBACK6_REQUIRED EMAKEHR(0x1380)
+#define CORPROF_E_CALLBACK7_REQUIRED EMAKEHR(0x1382)
#define SECURITY_E_XML_TO_ASN_ENCODING EMAKEHR(0x1400)
#define SECURITY_E_INCOMPATIBLE_SHARE EMAKEHR(0x1401)
#define SECURITY_E_UNVERIFIABLE EMAKEHR(0x1402)
diff --git a/src/pal/prebuilt/inc/corprof.h b/src/pal/prebuilt/inc/corprof.h
index 6043b4f395..8dee68d22d 100644
--- a/src/pal/prebuilt/inc/corprof.h
+++ b/src/pal/prebuilt/inc/corprof.h
@@ -80,6 +80,13 @@ typedef interface ICorProfilerCallback6 ICorProfilerCallback6;
#endif /* __ICorProfilerCallback6_FWD_DEFINED__ */
+#ifndef __ICorProfilerCallback7_FWD_DEFINED__
+#define __ICorProfilerCallback7_FWD_DEFINED__
+typedef interface ICorProfilerCallback7 ICorProfilerCallback7;
+
+#endif /* __ICorProfilerCallback7_FWD_DEFINED__ */
+
+
#ifndef __ICorProfilerInfo_FWD_DEFINED__
#define __ICorProfilerInfo_FWD_DEFINED__
typedef interface ICorProfilerInfo ICorProfilerInfo;
@@ -157,6 +164,13 @@ typedef interface ICorProfilerInfo6 ICorProfilerInfo6;
#endif /* __ICorProfilerInfo6_FWD_DEFINED__ */
+#ifndef __ICorProfilerInfo7_FWD_DEFINED__
+#define __ICorProfilerInfo7_FWD_DEFINED__
+typedef interface ICorProfilerInfo7 ICorProfilerInfo7;
+
+#endif /* __ICorProfilerInfo7_FWD_DEFINED__ */
+
+
#ifndef __ICorProfilerMethodEnum_FWD_DEFINED__
#define __ICorProfilerMethodEnum_FWD_DEFINED__
typedef interface ICorProfilerMethodEnum ICorProfilerMethodEnum;
@@ -463,7 +477,7 @@ enum __MIDL___MIDL_itf_corprof_0000_0000_0005
COR_PRF_DISABLE_ALL_NGEN_IMAGES = 0x80000000,
COR_PRF_ALL = 0x8fffffff,
COR_PRF_REQUIRE_PROFILE_IMAGE = ( ( COR_PRF_USE_PROFILE_IMAGES | COR_PRF_MONITOR_CODE_TRANSITIONS ) | COR_PRF_MONITOR_ENTERLEAVE ) ,
- COR_PRF_ALLOWABLE_AFTER_ATTACH = ( ( ( ( ( ( ( ( COR_PRF_MONITOR_THREADS | COR_PRF_MONITOR_MODULE_LOADS ) | COR_PRF_MONITOR_ASSEMBLY_LOADS ) | COR_PRF_MONITOR_APPDOMAIN_LOADS ) | COR_PRF_ENABLE_STACK_SNAPSHOT ) | COR_PRF_MONITOR_GC ) | COR_PRF_MONITOR_SUSPENDS ) | COR_PRF_MONITOR_CLASS_LOADS ) | COR_PRF_MONITOR_JIT_COMPILATION ) ,
+ COR_PRF_ALLOWABLE_AFTER_ATTACH = ( ( ( ( ( ( ( ( ( COR_PRF_MONITOR_THREADS | COR_PRF_MONITOR_MODULE_LOADS ) | COR_PRF_MONITOR_ASSEMBLY_LOADS ) | COR_PRF_MONITOR_APPDOMAIN_LOADS ) | COR_PRF_ENABLE_STACK_SNAPSHOT ) | COR_PRF_MONITOR_GC ) | COR_PRF_MONITOR_SUSPENDS ) | COR_PRF_MONITOR_CLASS_LOADS ) | COR_PRF_MONITOR_EXCEPTIONS ) | COR_PRF_MONITOR_JIT_COMPILATION ) ,
COR_PRF_MONITOR_IMMUTABLE = ( ( ( ( ( ( ( ( ( ( ( ( ( ( ( COR_PRF_MONITOR_CODE_TRANSITIONS | COR_PRF_MONITOR_REMOTING ) | COR_PRF_MONITOR_REMOTING_COOKIE ) | COR_PRF_MONITOR_REMOTING_ASYNC ) | COR_PRF_ENABLE_REJIT ) | COR_PRF_ENABLE_INPROC_DEBUGGING ) | COR_PRF_ENABLE_JIT_MAPS ) | COR_PRF_DISABLE_OPTIMIZATIONS ) | COR_PRF_DISABLE_INLINING ) | COR_PRF_ENABLE_OBJECT_ALLOCATED ) | COR_PRF_ENABLE_FUNCTION_ARGS ) | COR_PRF_ENABLE_FUNCTION_RETVAL ) | COR_PRF_ENABLE_FRAME_INFO ) | COR_PRF_USE_PROFILE_IMAGES ) | COR_PRF_DISABLE_TRANSPARENCY_CHECKS_UNDER_FULL_TRUST ) | COR_PRF_DISABLE_ALL_NGEN_IMAGES )
} COR_PRF_MONITOR;
@@ -472,8 +486,9 @@ enum __MIDL___MIDL_itf_corprof_0000_0000_0006
{
COR_PRF_HIGH_MONITOR_NONE = 0,
COR_PRF_HIGH_ADD_ASSEMBLY_REFERENCES = 0x1,
+ COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED = 0x2,
COR_PRF_HIGH_REQUIRE_PROFILE_IMAGE = 0,
- COR_PRF_HIGH_ALLOWABLE_AFTER_ATTACH = 0,
+ COR_PRF_HIGH_ALLOWABLE_AFTER_ATTACH = COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED,
COR_PRF_HIGH_MONITOR_IMMUTABLE = 0
} COR_PRF_HIGH_MONITOR;
@@ -5037,11 +5052,767 @@ EXTERN_C const IID IID_ICorProfilerCallback6;
#endif /* __ICorProfilerCallback6_INTERFACE_DEFINED__ */
-/* interface __MIDL_itf_corprof_0000_0006 */
+#ifndef __ICorProfilerCallback7_INTERFACE_DEFINED__
+#define __ICorProfilerCallback7_INTERFACE_DEFINED__
+
+/* interface ICorProfilerCallback7 */
+/* [local][unique][uuid][object] */
+
+
+EXTERN_C const IID IID_ICorProfilerCallback7;
+
+#if defined(__cplusplus) && !defined(CINTERFACE)
+
+ MIDL_INTERFACE("F76A2DBA-1D52-4539-866C-2AA518F9EFC3")
+ ICorProfilerCallback7 : public ICorProfilerCallback6
+ {
+ public:
+ virtual HRESULT STDMETHODCALLTYPE ModuleInMemorySymbolsUpdated(
+ ModuleID moduleId) = 0;
+
+ };
+
+
+#else /* C style interface */
+
+ typedef struct ICorProfilerCallback7Vtbl
+ {
+ BEGIN_INTERFACE
+
+ HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ REFIID riid,
+ /* [annotation][iid_is][out] */
+ _COM_Outptr_ void **ppvObject);
+
+ ULONG ( STDMETHODCALLTYPE *AddRef )(
+ ICorProfilerCallback7 * This);
+
+ ULONG ( STDMETHODCALLTYPE *Release )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *Initialize )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ IUnknown *pICorProfilerInfoUnk);
+
+ HRESULT ( STDMETHODCALLTYPE *Shutdown )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *AppDomainCreationStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ AppDomainID appDomainId);
+
+ HRESULT ( STDMETHODCALLTYPE *AppDomainCreationFinished )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ AppDomainID appDomainId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ AppDomainID appDomainId);
+
+ HRESULT ( STDMETHODCALLTYPE *AppDomainShutdownFinished )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ AppDomainID appDomainId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *AssemblyLoadStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ AssemblyID assemblyId);
+
+ HRESULT ( STDMETHODCALLTYPE *AssemblyLoadFinished )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ AssemblyID assemblyId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ AssemblyID assemblyId);
+
+ HRESULT ( STDMETHODCALLTYPE *AssemblyUnloadFinished )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ AssemblyID assemblyId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *ModuleLoadStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ModuleID moduleId);
+
+ HRESULT ( STDMETHODCALLTYPE *ModuleLoadFinished )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *ModuleUnloadStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ModuleID moduleId);
+
+ HRESULT ( STDMETHODCALLTYPE *ModuleUnloadFinished )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *ModuleAttachedToAssembly )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ AssemblyID AssemblyId);
+
+ HRESULT ( STDMETHODCALLTYPE *ClassLoadStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ClassID classId);
+
+ HRESULT ( STDMETHODCALLTYPE *ClassLoadFinished )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ClassID classId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *ClassUnloadStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ClassID classId);
+
+ HRESULT ( STDMETHODCALLTYPE *ClassUnloadFinished )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ClassID classId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *FunctionUnloadStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *JITCompilationStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ BOOL fIsSafeToBlock);
+
+ HRESULT ( STDMETHODCALLTYPE *JITCompilationFinished )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ HRESULT hrStatus,
+ /* [in] */ BOOL fIsSafeToBlock);
+
+ HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [out] */ BOOL *pbUseCachedFunction);
+
+ HRESULT ( STDMETHODCALLTYPE *JITCachedFunctionSearchFinished )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ COR_PRF_JIT_CACHE result);
+
+ HRESULT ( STDMETHODCALLTYPE *JITFunctionPitched )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *JITInlining )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID callerId,
+ /* [in] */ FunctionID calleeId,
+ /* [out] */ BOOL *pfShouldInline);
+
+ HRESULT ( STDMETHODCALLTYPE *ThreadCreated )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ThreadID threadId);
+
+ HRESULT ( STDMETHODCALLTYPE *ThreadDestroyed )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ThreadID threadId);
+
+ HRESULT ( STDMETHODCALLTYPE *ThreadAssignedToOSThread )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ThreadID managedThreadId,
+ /* [in] */ DWORD osThreadId);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationStarted )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingClientSendingMessage )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ GUID *pCookie,
+ /* [in] */ BOOL fIsAsync);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingClientReceivingReply )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ GUID *pCookie,
+ /* [in] */ BOOL fIsAsync);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingClientInvocationFinished )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingServerReceivingMessage )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ GUID *pCookie,
+ /* [in] */ BOOL fIsAsync);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationStarted )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingServerInvocationReturned )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RemotingServerSendingReply )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ GUID *pCookie,
+ /* [in] */ BOOL fIsAsync);
+
+ HRESULT ( STDMETHODCALLTYPE *UnmanagedToManagedTransition )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ COR_PRF_TRANSITION_REASON reason);
+
+ HRESULT ( STDMETHODCALLTYPE *ManagedToUnmanagedTransition )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ COR_PRF_TRANSITION_REASON reason);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ COR_PRF_SUSPEND_REASON suspendReason);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendFinished )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeSuspendAborted )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeResumeStarted )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeResumeFinished )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeThreadSuspended )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ThreadID threadId);
+
+ HRESULT ( STDMETHODCALLTYPE *RuntimeThreadResumed )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ThreadID threadId);
+
+ HRESULT ( STDMETHODCALLTYPE *MovedReferences )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ULONG cMovedObjectIDRanges,
+ /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ],
+ /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ],
+ /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *ObjectAllocated )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ObjectID objectId,
+ /* [in] */ ClassID classId);
+
+ HRESULT ( STDMETHODCALLTYPE *ObjectsAllocatedByClass )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ULONG cClassCount,
+ /* [size_is][in] */ ClassID classIds[ ],
+ /* [size_is][in] */ ULONG cObjects[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *ObjectReferences )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ObjectID objectId,
+ /* [in] */ ClassID classId,
+ /* [in] */ ULONG cObjectRefs,
+ /* [size_is][in] */ ObjectID objectRefIds[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *RootReferences )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ULONG cRootRefs,
+ /* [size_is][in] */ ObjectID rootRefIds[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionThrown )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ObjectID thrownObjectId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionEnter )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFunctionLeave )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterEnter )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionSearchFilterLeave )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionSearchCatcherFound )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerEnter )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ UINT_PTR __unused);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionOSHandlerLeave )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ UINT_PTR __unused);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionEnter )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFunctionLeave )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyEnter )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionUnwindFinallyLeave )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherEnter )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ObjectID objectId);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionCatcherLeave )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *COMClassicVTableCreated )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ClassID wrappedClassId,
+ /* [in] */ REFGUID implementedIID,
+ /* [in] */ void *pVTable,
+ /* [in] */ ULONG cSlots);
+
+ HRESULT ( STDMETHODCALLTYPE *COMClassicVTableDestroyed )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ClassID wrappedClassId,
+ /* [in] */ REFGUID implementedIID,
+ /* [in] */ void *pVTable);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherFound )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ExceptionCLRCatcherExecute )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ThreadNameChanged )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ThreadID threadId,
+ /* [in] */ ULONG cchName,
+ /* [annotation][in] */
+ _In_reads_opt_(cchName) WCHAR name[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GarbageCollectionStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ int cGenerations,
+ /* [size_is][in] */ BOOL generationCollected[ ],
+ /* [in] */ COR_PRF_GC_REASON reason);
+
+ HRESULT ( STDMETHODCALLTYPE *SurvivingReferences )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ULONG cSurvivingObjectIDRanges,
+ /* [size_is][in] */ ObjectID objectIDRangeStart[ ],
+ /* [size_is][in] */ ULONG cObjectIDRangeLength[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GarbageCollectionFinished )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *FinalizeableObjectQueued )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ DWORD finalizerFlags,
+ /* [in] */ ObjectID objectID);
+
+ HRESULT ( STDMETHODCALLTYPE *RootReferences2 )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ULONG cRootRefs,
+ /* [size_is][in] */ ObjectID rootRefIds[ ],
+ /* [size_is][in] */ COR_PRF_GC_ROOT_KIND rootKinds[ ],
+ /* [size_is][in] */ COR_PRF_GC_ROOT_FLAGS rootFlags[ ],
+ /* [size_is][in] */ UINT_PTR rootIds[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *HandleCreated )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ GCHandleID handleId,
+ /* [in] */ ObjectID initialObjectId);
+
+ HRESULT ( STDMETHODCALLTYPE *HandleDestroyed )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ GCHandleID handleId);
+
+ HRESULT ( STDMETHODCALLTYPE *InitializeForAttach )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ IUnknown *pCorProfilerInfoUnk,
+ /* [in] */ void *pvClientData,
+ /* [in] */ UINT cbClientData);
+
+ HRESULT ( STDMETHODCALLTYPE *ProfilerAttachComplete )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ProfilerDetachSucceeded )(
+ ICorProfilerCallback7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *ReJITCompilationStarted )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ReJITID rejitId,
+ /* [in] */ BOOL fIsSafeToBlock);
+
+ HRESULT ( STDMETHODCALLTYPE *GetReJITParameters )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ mdMethodDef methodId,
+ /* [in] */ ICorProfilerFunctionControl *pFunctionControl);
+
+ HRESULT ( STDMETHODCALLTYPE *ReJITCompilationFinished )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ReJITID rejitId,
+ /* [in] */ HRESULT hrStatus,
+ /* [in] */ BOOL fIsSafeToBlock);
+
+ HRESULT ( STDMETHODCALLTYPE *ReJITError )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ mdMethodDef methodId,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ HRESULT hrStatus);
+
+ HRESULT ( STDMETHODCALLTYPE *MovedReferences2 )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ULONG cMovedObjectIDRanges,
+ /* [size_is][in] */ ObjectID oldObjectIDRangeStart[ ],
+ /* [size_is][in] */ ObjectID newObjectIDRangeStart[ ],
+ /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *SurvivingReferences2 )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ULONG cSurvivingObjectIDRanges,
+ /* [size_is][in] */ ObjectID objectIDRangeStart[ ],
+ /* [size_is][in] */ SIZE_T cObjectIDRangeLength[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *ConditionalWeakTableElementReferences )(
+ ICorProfilerCallback7 * This,
+ /* [in] */ ULONG cRootRefs,
+ /* [size_is][in] */ ObjectID keyRefIds[ ],
+ /* [size_is][in] */ ObjectID valueRefIds[ ],
+ /* [size_is][in] */ GCHandleID rootIds[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GetAssemblyReferences )(
+ ICorProfilerCallback7 * This,
+ /* [string][in] */ const WCHAR *wszAssemblyPath,
+ /* [in] */ ICorProfilerAssemblyReferenceProvider *pAsmRefProvider);
+
+ HRESULT ( STDMETHODCALLTYPE *ModuleInMemorySymbolsUpdated )(
+ ICorProfilerCallback7 * This,
+ ModuleID moduleId);
+
+ END_INTERFACE
+ } ICorProfilerCallback7Vtbl;
+
+ interface ICorProfilerCallback7
+ {
+ CONST_VTBL struct ICorProfilerCallback7Vtbl *lpVtbl;
+ };
+
+
+
+#ifdef COBJMACROS
+
+
+#define ICorProfilerCallback7_QueryInterface(This,riid,ppvObject) \
+ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
+
+#define ICorProfilerCallback7_AddRef(This) \
+ ( (This)->lpVtbl -> AddRef(This) )
+
+#define ICorProfilerCallback7_Release(This) \
+ ( (This)->lpVtbl -> Release(This) )
+
+
+#define ICorProfilerCallback7_Initialize(This,pICorProfilerInfoUnk) \
+ ( (This)->lpVtbl -> Initialize(This,pICorProfilerInfoUnk) )
+
+#define ICorProfilerCallback7_Shutdown(This) \
+ ( (This)->lpVtbl -> Shutdown(This) )
+
+#define ICorProfilerCallback7_AppDomainCreationStarted(This,appDomainId) \
+ ( (This)->lpVtbl -> AppDomainCreationStarted(This,appDomainId) )
+
+#define ICorProfilerCallback7_AppDomainCreationFinished(This,appDomainId,hrStatus) \
+ ( (This)->lpVtbl -> AppDomainCreationFinished(This,appDomainId,hrStatus) )
+
+#define ICorProfilerCallback7_AppDomainShutdownStarted(This,appDomainId) \
+ ( (This)->lpVtbl -> AppDomainShutdownStarted(This,appDomainId) )
+
+#define ICorProfilerCallback7_AppDomainShutdownFinished(This,appDomainId,hrStatus) \
+ ( (This)->lpVtbl -> AppDomainShutdownFinished(This,appDomainId,hrStatus) )
+
+#define ICorProfilerCallback7_AssemblyLoadStarted(This,assemblyId) \
+ ( (This)->lpVtbl -> AssemblyLoadStarted(This,assemblyId) )
+
+#define ICorProfilerCallback7_AssemblyLoadFinished(This,assemblyId,hrStatus) \
+ ( (This)->lpVtbl -> AssemblyLoadFinished(This,assemblyId,hrStatus) )
+
+#define ICorProfilerCallback7_AssemblyUnloadStarted(This,assemblyId) \
+ ( (This)->lpVtbl -> AssemblyUnloadStarted(This,assemblyId) )
+
+#define ICorProfilerCallback7_AssemblyUnloadFinished(This,assemblyId,hrStatus) \
+ ( (This)->lpVtbl -> AssemblyUnloadFinished(This,assemblyId,hrStatus) )
+
+#define ICorProfilerCallback7_ModuleLoadStarted(This,moduleId) \
+ ( (This)->lpVtbl -> ModuleLoadStarted(This,moduleId) )
+
+#define ICorProfilerCallback7_ModuleLoadFinished(This,moduleId,hrStatus) \
+ ( (This)->lpVtbl -> ModuleLoadFinished(This,moduleId,hrStatus) )
+
+#define ICorProfilerCallback7_ModuleUnloadStarted(This,moduleId) \
+ ( (This)->lpVtbl -> ModuleUnloadStarted(This,moduleId) )
+
+#define ICorProfilerCallback7_ModuleUnloadFinished(This,moduleId,hrStatus) \
+ ( (This)->lpVtbl -> ModuleUnloadFinished(This,moduleId,hrStatus) )
+
+#define ICorProfilerCallback7_ModuleAttachedToAssembly(This,moduleId,AssemblyId) \
+ ( (This)->lpVtbl -> ModuleAttachedToAssembly(This,moduleId,AssemblyId) )
+
+#define ICorProfilerCallback7_ClassLoadStarted(This,classId) \
+ ( (This)->lpVtbl -> ClassLoadStarted(This,classId) )
+
+#define ICorProfilerCallback7_ClassLoadFinished(This,classId,hrStatus) \
+ ( (This)->lpVtbl -> ClassLoadFinished(This,classId,hrStatus) )
+
+#define ICorProfilerCallback7_ClassUnloadStarted(This,classId) \
+ ( (This)->lpVtbl -> ClassUnloadStarted(This,classId) )
+
+#define ICorProfilerCallback7_ClassUnloadFinished(This,classId,hrStatus) \
+ ( (This)->lpVtbl -> ClassUnloadFinished(This,classId,hrStatus) )
+
+#define ICorProfilerCallback7_FunctionUnloadStarted(This,functionId) \
+ ( (This)->lpVtbl -> FunctionUnloadStarted(This,functionId) )
+
+#define ICorProfilerCallback7_JITCompilationStarted(This,functionId,fIsSafeToBlock) \
+ ( (This)->lpVtbl -> JITCompilationStarted(This,functionId,fIsSafeToBlock) )
+
+#define ICorProfilerCallback7_JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) \
+ ( (This)->lpVtbl -> JITCompilationFinished(This,functionId,hrStatus,fIsSafeToBlock) )
+
+#define ICorProfilerCallback7_JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) \
+ ( (This)->lpVtbl -> JITCachedFunctionSearchStarted(This,functionId,pbUseCachedFunction) )
+
+#define ICorProfilerCallback7_JITCachedFunctionSearchFinished(This,functionId,result) \
+ ( (This)->lpVtbl -> JITCachedFunctionSearchFinished(This,functionId,result) )
+
+#define ICorProfilerCallback7_JITFunctionPitched(This,functionId) \
+ ( (This)->lpVtbl -> JITFunctionPitched(This,functionId) )
+
+#define ICorProfilerCallback7_JITInlining(This,callerId,calleeId,pfShouldInline) \
+ ( (This)->lpVtbl -> JITInlining(This,callerId,calleeId,pfShouldInline) )
+
+#define ICorProfilerCallback7_ThreadCreated(This,threadId) \
+ ( (This)->lpVtbl -> ThreadCreated(This,threadId) )
+
+#define ICorProfilerCallback7_ThreadDestroyed(This,threadId) \
+ ( (This)->lpVtbl -> ThreadDestroyed(This,threadId) )
+
+#define ICorProfilerCallback7_ThreadAssignedToOSThread(This,managedThreadId,osThreadId) \
+ ( (This)->lpVtbl -> ThreadAssignedToOSThread(This,managedThreadId,osThreadId) )
+
+#define ICorProfilerCallback7_RemotingClientInvocationStarted(This) \
+ ( (This)->lpVtbl -> RemotingClientInvocationStarted(This) )
+
+#define ICorProfilerCallback7_RemotingClientSendingMessage(This,pCookie,fIsAsync) \
+ ( (This)->lpVtbl -> RemotingClientSendingMessage(This,pCookie,fIsAsync) )
+
+#define ICorProfilerCallback7_RemotingClientReceivingReply(This,pCookie,fIsAsync) \
+ ( (This)->lpVtbl -> RemotingClientReceivingReply(This,pCookie,fIsAsync) )
+
+#define ICorProfilerCallback7_RemotingClientInvocationFinished(This) \
+ ( (This)->lpVtbl -> RemotingClientInvocationFinished(This) )
+
+#define ICorProfilerCallback7_RemotingServerReceivingMessage(This,pCookie,fIsAsync) \
+ ( (This)->lpVtbl -> RemotingServerReceivingMessage(This,pCookie,fIsAsync) )
+
+#define ICorProfilerCallback7_RemotingServerInvocationStarted(This) \
+ ( (This)->lpVtbl -> RemotingServerInvocationStarted(This) )
+
+#define ICorProfilerCallback7_RemotingServerInvocationReturned(This) \
+ ( (This)->lpVtbl -> RemotingServerInvocationReturned(This) )
+
+#define ICorProfilerCallback7_RemotingServerSendingReply(This,pCookie,fIsAsync) \
+ ( (This)->lpVtbl -> RemotingServerSendingReply(This,pCookie,fIsAsync) )
+
+#define ICorProfilerCallback7_UnmanagedToManagedTransition(This,functionId,reason) \
+ ( (This)->lpVtbl -> UnmanagedToManagedTransition(This,functionId,reason) )
+
+#define ICorProfilerCallback7_ManagedToUnmanagedTransition(This,functionId,reason) \
+ ( (This)->lpVtbl -> ManagedToUnmanagedTransition(This,functionId,reason) )
+
+#define ICorProfilerCallback7_RuntimeSuspendStarted(This,suspendReason) \
+ ( (This)->lpVtbl -> RuntimeSuspendStarted(This,suspendReason) )
+
+#define ICorProfilerCallback7_RuntimeSuspendFinished(This) \
+ ( (This)->lpVtbl -> RuntimeSuspendFinished(This) )
+
+#define ICorProfilerCallback7_RuntimeSuspendAborted(This) \
+ ( (This)->lpVtbl -> RuntimeSuspendAborted(This) )
+
+#define ICorProfilerCallback7_RuntimeResumeStarted(This) \
+ ( (This)->lpVtbl -> RuntimeResumeStarted(This) )
+
+#define ICorProfilerCallback7_RuntimeResumeFinished(This) \
+ ( (This)->lpVtbl -> RuntimeResumeFinished(This) )
+
+#define ICorProfilerCallback7_RuntimeThreadSuspended(This,threadId) \
+ ( (This)->lpVtbl -> RuntimeThreadSuspended(This,threadId) )
+
+#define ICorProfilerCallback7_RuntimeThreadResumed(This,threadId) \
+ ( (This)->lpVtbl -> RuntimeThreadResumed(This,threadId) )
+
+#define ICorProfilerCallback7_MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \
+ ( (This)->lpVtbl -> MovedReferences(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) )
+
+#define ICorProfilerCallback7_ObjectAllocated(This,objectId,classId) \
+ ( (This)->lpVtbl -> ObjectAllocated(This,objectId,classId) )
+
+#define ICorProfilerCallback7_ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) \
+ ( (This)->lpVtbl -> ObjectsAllocatedByClass(This,cClassCount,classIds,cObjects) )
+
+#define ICorProfilerCallback7_ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) \
+ ( (This)->lpVtbl -> ObjectReferences(This,objectId,classId,cObjectRefs,objectRefIds) )
+
+#define ICorProfilerCallback7_RootReferences(This,cRootRefs,rootRefIds) \
+ ( (This)->lpVtbl -> RootReferences(This,cRootRefs,rootRefIds) )
+
+#define ICorProfilerCallback7_ExceptionThrown(This,thrownObjectId) \
+ ( (This)->lpVtbl -> ExceptionThrown(This,thrownObjectId) )
+
+#define ICorProfilerCallback7_ExceptionSearchFunctionEnter(This,functionId) \
+ ( (This)->lpVtbl -> ExceptionSearchFunctionEnter(This,functionId) )
+
+#define ICorProfilerCallback7_ExceptionSearchFunctionLeave(This) \
+ ( (This)->lpVtbl -> ExceptionSearchFunctionLeave(This) )
+
+#define ICorProfilerCallback7_ExceptionSearchFilterEnter(This,functionId) \
+ ( (This)->lpVtbl -> ExceptionSearchFilterEnter(This,functionId) )
+
+#define ICorProfilerCallback7_ExceptionSearchFilterLeave(This) \
+ ( (This)->lpVtbl -> ExceptionSearchFilterLeave(This) )
+
+#define ICorProfilerCallback7_ExceptionSearchCatcherFound(This,functionId) \
+ ( (This)->lpVtbl -> ExceptionSearchCatcherFound(This,functionId) )
+
+#define ICorProfilerCallback7_ExceptionOSHandlerEnter(This,__unused) \
+ ( (This)->lpVtbl -> ExceptionOSHandlerEnter(This,__unused) )
+
+#define ICorProfilerCallback7_ExceptionOSHandlerLeave(This,__unused) \
+ ( (This)->lpVtbl -> ExceptionOSHandlerLeave(This,__unused) )
+
+#define ICorProfilerCallback7_ExceptionUnwindFunctionEnter(This,functionId) \
+ ( (This)->lpVtbl -> ExceptionUnwindFunctionEnter(This,functionId) )
+
+#define ICorProfilerCallback7_ExceptionUnwindFunctionLeave(This) \
+ ( (This)->lpVtbl -> ExceptionUnwindFunctionLeave(This) )
+
+#define ICorProfilerCallback7_ExceptionUnwindFinallyEnter(This,functionId) \
+ ( (This)->lpVtbl -> ExceptionUnwindFinallyEnter(This,functionId) )
+
+#define ICorProfilerCallback7_ExceptionUnwindFinallyLeave(This) \
+ ( (This)->lpVtbl -> ExceptionUnwindFinallyLeave(This) )
+
+#define ICorProfilerCallback7_ExceptionCatcherEnter(This,functionId,objectId) \
+ ( (This)->lpVtbl -> ExceptionCatcherEnter(This,functionId,objectId) )
+
+#define ICorProfilerCallback7_ExceptionCatcherLeave(This) \
+ ( (This)->lpVtbl -> ExceptionCatcherLeave(This) )
+
+#define ICorProfilerCallback7_COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) \
+ ( (This)->lpVtbl -> COMClassicVTableCreated(This,wrappedClassId,implementedIID,pVTable,cSlots) )
+
+#define ICorProfilerCallback7_COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) \
+ ( (This)->lpVtbl -> COMClassicVTableDestroyed(This,wrappedClassId,implementedIID,pVTable) )
+
+#define ICorProfilerCallback7_ExceptionCLRCatcherFound(This) \
+ ( (This)->lpVtbl -> ExceptionCLRCatcherFound(This) )
+
+#define ICorProfilerCallback7_ExceptionCLRCatcherExecute(This) \
+ ( (This)->lpVtbl -> ExceptionCLRCatcherExecute(This) )
+
+
+#define ICorProfilerCallback7_ThreadNameChanged(This,threadId,cchName,name) \
+ ( (This)->lpVtbl -> ThreadNameChanged(This,threadId,cchName,name) )
+
+#define ICorProfilerCallback7_GarbageCollectionStarted(This,cGenerations,generationCollected,reason) \
+ ( (This)->lpVtbl -> GarbageCollectionStarted(This,cGenerations,generationCollected,reason) )
+
+#define ICorProfilerCallback7_SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \
+ ( (This)->lpVtbl -> SurvivingReferences(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) )
+
+#define ICorProfilerCallback7_GarbageCollectionFinished(This) \
+ ( (This)->lpVtbl -> GarbageCollectionFinished(This) )
+
+#define ICorProfilerCallback7_FinalizeableObjectQueued(This,finalizerFlags,objectID) \
+ ( (This)->lpVtbl -> FinalizeableObjectQueued(This,finalizerFlags,objectID) )
+
+#define ICorProfilerCallback7_RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) \
+ ( (This)->lpVtbl -> RootReferences2(This,cRootRefs,rootRefIds,rootKinds,rootFlags,rootIds) )
+
+#define ICorProfilerCallback7_HandleCreated(This,handleId,initialObjectId) \
+ ( (This)->lpVtbl -> HandleCreated(This,handleId,initialObjectId) )
+
+#define ICorProfilerCallback7_HandleDestroyed(This,handleId) \
+ ( (This)->lpVtbl -> HandleDestroyed(This,handleId) )
+
+
+#define ICorProfilerCallback7_InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) \
+ ( (This)->lpVtbl -> InitializeForAttach(This,pCorProfilerInfoUnk,pvClientData,cbClientData) )
+
+#define ICorProfilerCallback7_ProfilerAttachComplete(This) \
+ ( (This)->lpVtbl -> ProfilerAttachComplete(This) )
+
+#define ICorProfilerCallback7_ProfilerDetachSucceeded(This) \
+ ( (This)->lpVtbl -> ProfilerDetachSucceeded(This) )
+
+
+#define ICorProfilerCallback7_ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) \
+ ( (This)->lpVtbl -> ReJITCompilationStarted(This,functionId,rejitId,fIsSafeToBlock) )
+
+#define ICorProfilerCallback7_GetReJITParameters(This,moduleId,methodId,pFunctionControl) \
+ ( (This)->lpVtbl -> GetReJITParameters(This,moduleId,methodId,pFunctionControl) )
+
+#define ICorProfilerCallback7_ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) \
+ ( (This)->lpVtbl -> ReJITCompilationFinished(This,functionId,rejitId,hrStatus,fIsSafeToBlock) )
+
+#define ICorProfilerCallback7_ReJITError(This,moduleId,methodId,functionId,hrStatus) \
+ ( (This)->lpVtbl -> ReJITError(This,moduleId,methodId,functionId,hrStatus) )
+
+#define ICorProfilerCallback7_MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) \
+ ( (This)->lpVtbl -> MovedReferences2(This,cMovedObjectIDRanges,oldObjectIDRangeStart,newObjectIDRangeStart,cObjectIDRangeLength) )
+
+#define ICorProfilerCallback7_SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) \
+ ( (This)->lpVtbl -> SurvivingReferences2(This,cSurvivingObjectIDRanges,objectIDRangeStart,cObjectIDRangeLength) )
+
+
+#define ICorProfilerCallback7_ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) \
+ ( (This)->lpVtbl -> ConditionalWeakTableElementReferences(This,cRootRefs,keyRefIds,valueRefIds,rootIds) )
+
+
+#define ICorProfilerCallback7_GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) \
+ ( (This)->lpVtbl -> GetAssemblyReferences(This,wszAssemblyPath,pAsmRefProvider) )
+
+
+#define ICorProfilerCallback7_ModuleInMemorySymbolsUpdated(This,moduleId) \
+ ( (This)->lpVtbl -> ModuleInMemorySymbolsUpdated(This,moduleId) )
+
+#endif /* COBJMACROS */
+
+
+#endif /* C style interface */
+
+
+
+
+#endif /* __ICorProfilerCallback7_INTERFACE_DEFINED__ */
+
+
+/* interface __MIDL_itf_corprof_0000_0007 */
/* [local] */
typedef /* [public] */
-enum __MIDL___MIDL_itf_corprof_0000_0006_0001
+enum __MIDL___MIDL_itf_corprof_0000_0007_0001
{
COR_PRF_CODEGEN_DISABLE_INLINING = 0x1,
COR_PRF_CODEGEN_DISABLE_ALL_OPTIMIZATIONS = 0x2
@@ -5049,8 +5820,8 @@ enum __MIDL___MIDL_itf_corprof_0000_0006_0001
-extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0006_v0_0_c_ifspec;
-extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0006_v0_0_s_ifspec;
+extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0007_v0_0_c_ifspec;
+extern RPC_IF_HANDLE __MIDL_itf_corprof_0000_0007_v0_0_s_ifspec;
#ifndef __ICorProfilerInfo_INTERFACE_DEFINED__
#define __ICorProfilerInfo_INTERFACE_DEFINED__
@@ -10079,6 +10850,869 @@ EXTERN_C const IID IID_ICorProfilerInfo6;
#endif /* __ICorProfilerInfo6_INTERFACE_DEFINED__ */
+#ifndef __ICorProfilerInfo7_INTERFACE_DEFINED__
+#define __ICorProfilerInfo7_INTERFACE_DEFINED__
+
+/* interface ICorProfilerInfo7 */
+/* [local][unique][uuid][object] */
+
+
+EXTERN_C const IID IID_ICorProfilerInfo7;
+
+#if defined(__cplusplus) && !defined(CINTERFACE)
+
+ MIDL_INTERFACE("9AEECC0D-63E0-4187-8C00-E312F503F663")
+ ICorProfilerInfo7 : public ICorProfilerInfo6
+ {
+ public:
+ virtual HRESULT STDMETHODCALLTYPE ApplyMetaData(
+ /* [in] */ ModuleID moduleId) = 0;
+
+ virtual HRESULT STDMETHODCALLTYPE GetInMemorySymbolsLength(
+ /* [in] */ ModuleID moduleId,
+ /* [out] */ DWORD *pCountSymbolBytes) = 0;
+
+ virtual HRESULT STDMETHODCALLTYPE ReadInMemorySymbols(
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ DWORD symbolsReadOffset,
+ /* [out] */ BYTE *pSymbolBytes,
+ /* [in] */ DWORD countSymbolBytes,
+ /* [out] */ DWORD *pCountSymbolBytesRead) = 0;
+
+ };
+
+
+#else /* C style interface */
+
+ typedef struct ICorProfilerInfo7Vtbl
+ {
+ BEGIN_INTERFACE
+
+ HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ REFIID riid,
+ /* [annotation][iid_is][out] */
+ _COM_Outptr_ void **ppvObject);
+
+ ULONG ( STDMETHODCALLTYPE *AddRef )(
+ ICorProfilerInfo7 * This);
+
+ ULONG ( STDMETHODCALLTYPE *Release )(
+ ICorProfilerInfo7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *GetClassFromObject )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ObjectID objectId,
+ /* [out] */ ClassID *pClassId);
+
+ HRESULT ( STDMETHODCALLTYPE *GetClassFromToken )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ mdTypeDef typeDef,
+ /* [out] */ ClassID *pClassId);
+
+ HRESULT ( STDMETHODCALLTYPE *GetCodeInfo )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [out] */ LPCBYTE *pStart,
+ /* [out] */ ULONG *pcSize);
+
+ HRESULT ( STDMETHODCALLTYPE *GetEventMask )(
+ ICorProfilerInfo7 * This,
+ /* [out] */ DWORD *pdwEvents);
+
+ HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ LPCBYTE ip,
+ /* [out] */ FunctionID *pFunctionId);
+
+ HRESULT ( STDMETHODCALLTYPE *GetFunctionFromToken )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ mdToken token,
+ /* [out] */ FunctionID *pFunctionId);
+
+ HRESULT ( STDMETHODCALLTYPE *GetHandleFromThread )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ThreadID threadId,
+ /* [out] */ HANDLE *phThread);
+
+ HRESULT ( STDMETHODCALLTYPE *GetObjectSize )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ObjectID objectId,
+ /* [out] */ ULONG *pcSize);
+
+ HRESULT ( STDMETHODCALLTYPE *IsArrayClass )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ClassID classId,
+ /* [out] */ CorElementType *pBaseElemType,
+ /* [out] */ ClassID *pBaseClassId,
+ /* [out] */ ULONG *pcRank);
+
+ HRESULT ( STDMETHODCALLTYPE *GetThreadInfo )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ThreadID threadId,
+ /* [out] */ DWORD *pdwWin32ThreadId);
+
+ HRESULT ( STDMETHODCALLTYPE *GetCurrentThreadID )(
+ ICorProfilerInfo7 * This,
+ /* [out] */ ThreadID *pThreadId);
+
+ HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ClassID classId,
+ /* [out] */ ModuleID *pModuleId,
+ /* [out] */ mdTypeDef *pTypeDefToken);
+
+ HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [out] */ ClassID *pClassId,
+ /* [out] */ ModuleID *pModuleId,
+ /* [out] */ mdToken *pToken);
+
+ HRESULT ( STDMETHODCALLTYPE *SetEventMask )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ DWORD dwEvents);
+
+ HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionEnter *pFuncEnter,
+ /* [in] */ FunctionLeave *pFuncLeave,
+ /* [in] */ FunctionTailcall *pFuncTailcall);
+
+ HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionIDMapper *pFunc);
+
+ HRESULT ( STDMETHODCALLTYPE *GetTokenAndMetaDataFromFunction )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ REFIID riid,
+ /* [out] */ IUnknown **ppImport,
+ /* [out] */ mdToken *pToken);
+
+ HRESULT ( STDMETHODCALLTYPE *GetModuleInfo )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [out] */ LPCBYTE *ppBaseLoadAddress,
+ /* [in] */ ULONG cchName,
+ /* [out] */ ULONG *pcchName,
+ /* [annotation][out] */
+ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ],
+ /* [out] */ AssemblyID *pAssemblyId);
+
+ HRESULT ( STDMETHODCALLTYPE *GetModuleMetaData )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ DWORD dwOpenFlags,
+ /* [in] */ REFIID riid,
+ /* [out] */ IUnknown **ppOut);
+
+ HRESULT ( STDMETHODCALLTYPE *GetILFunctionBody )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ mdMethodDef methodId,
+ /* [out] */ LPCBYTE *ppMethodHeader,
+ /* [out] */ ULONG *pcbMethodSize);
+
+ HRESULT ( STDMETHODCALLTYPE *GetILFunctionBodyAllocator )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [out] */ IMethodMalloc **ppMalloc);
+
+ HRESULT ( STDMETHODCALLTYPE *SetILFunctionBody )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ mdMethodDef methodid,
+ /* [in] */ LPCBYTE pbNewILMethodHeader);
+
+ HRESULT ( STDMETHODCALLTYPE *GetAppDomainInfo )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ AppDomainID appDomainId,
+ /* [in] */ ULONG cchName,
+ /* [out] */ ULONG *pcchName,
+ /* [annotation][out] */
+ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ],
+ /* [out] */ ProcessID *pProcessId);
+
+ HRESULT ( STDMETHODCALLTYPE *GetAssemblyInfo )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ AssemblyID assemblyId,
+ /* [in] */ ULONG cchName,
+ /* [out] */ ULONG *pcchName,
+ /* [annotation][out] */
+ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ],
+ /* [out] */ AppDomainID *pAppDomainId,
+ /* [out] */ ModuleID *pModuleId);
+
+ HRESULT ( STDMETHODCALLTYPE *SetFunctionReJIT )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID functionId);
+
+ HRESULT ( STDMETHODCALLTYPE *ForceGC )(
+ ICorProfilerInfo7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *SetILInstrumentedCodeMap )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ BOOL fStartJit,
+ /* [in] */ ULONG cILMapEntries,
+ /* [size_is][in] */ COR_IL_MAP rgILMapEntries[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionInterface )(
+ ICorProfilerInfo7 * This,
+ /* [out] */ IUnknown **ppicd);
+
+ HRESULT ( STDMETHODCALLTYPE *GetInprocInspectionIThisThread )(
+ ICorProfilerInfo7 * This,
+ /* [out] */ IUnknown **ppicd);
+
+ HRESULT ( STDMETHODCALLTYPE *GetThreadContext )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ThreadID threadId,
+ /* [out] */ ContextID *pContextId);
+
+ HRESULT ( STDMETHODCALLTYPE *BeginInprocDebugging )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ BOOL fThisThreadOnly,
+ /* [out] */ DWORD *pdwProfilerContext);
+
+ HRESULT ( STDMETHODCALLTYPE *EndInprocDebugging )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ DWORD dwProfilerContext);
+
+ HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ULONG32 cMap,
+ /* [out] */ ULONG32 *pcMap,
+ /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *DoStackSnapshot )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ThreadID thread,
+ /* [in] */ StackSnapshotCallback *callback,
+ /* [in] */ ULONG32 infoFlags,
+ /* [in] */ void *clientData,
+ /* [size_is][in] */ BYTE context[ ],
+ /* [in] */ ULONG32 contextSize);
+
+ HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks2 )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionEnter2 *pFuncEnter,
+ /* [in] */ FunctionLeave2 *pFuncLeave,
+ /* [in] */ FunctionTailcall2 *pFuncTailcall);
+
+ HRESULT ( STDMETHODCALLTYPE *GetFunctionInfo2 )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID funcId,
+ /* [in] */ COR_PRF_FRAME_INFO frameInfo,
+ /* [out] */ ClassID *pClassId,
+ /* [out] */ ModuleID *pModuleId,
+ /* [out] */ mdToken *pToken,
+ /* [in] */ ULONG32 cTypeArgs,
+ /* [out] */ ULONG32 *pcTypeArgs,
+ /* [out] */ ClassID typeArgs[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GetStringLayout )(
+ ICorProfilerInfo7 * This,
+ /* [out] */ ULONG *pBufferLengthOffset,
+ /* [out] */ ULONG *pStringLengthOffset,
+ /* [out] */ ULONG *pBufferOffset);
+
+ HRESULT ( STDMETHODCALLTYPE *GetClassLayout )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ClassID classID,
+ /* [out][in] */ COR_FIELD_OFFSET rFieldOffset[ ],
+ /* [in] */ ULONG cFieldOffset,
+ /* [out] */ ULONG *pcFieldOffset,
+ /* [out] */ ULONG *pulClassSize);
+
+ HRESULT ( STDMETHODCALLTYPE *GetClassIDInfo2 )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ClassID classId,
+ /* [out] */ ModuleID *pModuleId,
+ /* [out] */ mdTypeDef *pTypeDefToken,
+ /* [out] */ ClassID *pParentClassId,
+ /* [in] */ ULONG32 cNumTypeArgs,
+ /* [out] */ ULONG32 *pcNumTypeArgs,
+ /* [out] */ ClassID typeArgs[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GetCodeInfo2 )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID functionID,
+ /* [in] */ ULONG32 cCodeInfos,
+ /* [out] */ ULONG32 *pcCodeInfos,
+ /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GetClassFromTokenAndTypeArgs )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleID,
+ /* [in] */ mdTypeDef typeDef,
+ /* [in] */ ULONG32 cTypeArgs,
+ /* [size_is][in] */ ClassID typeArgs[ ],
+ /* [out] */ ClassID *pClassID);
+
+ HRESULT ( STDMETHODCALLTYPE *GetFunctionFromTokenAndTypeArgs )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleID,
+ /* [in] */ mdMethodDef funcDef,
+ /* [in] */ ClassID classId,
+ /* [in] */ ULONG32 cTypeArgs,
+ /* [size_is][in] */ ClassID typeArgs[ ],
+ /* [out] */ FunctionID *pFunctionID);
+
+ HRESULT ( STDMETHODCALLTYPE *EnumModuleFrozenObjects )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleID,
+ /* [out] */ ICorProfilerObjectEnum **ppEnum);
+
+ HRESULT ( STDMETHODCALLTYPE *GetArrayObjectInfo )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ObjectID objectId,
+ /* [in] */ ULONG32 cDimensions,
+ /* [size_is][out] */ ULONG32 pDimensionSizes[ ],
+ /* [size_is][out] */ int pDimensionLowerBounds[ ],
+ /* [out] */ BYTE **ppData);
+
+ HRESULT ( STDMETHODCALLTYPE *GetBoxClassLayout )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ClassID classId,
+ /* [out] */ ULONG32 *pBufferOffset);
+
+ HRESULT ( STDMETHODCALLTYPE *GetThreadAppDomain )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ThreadID threadId,
+ /* [out] */ AppDomainID *pAppDomainId);
+
+ HRESULT ( STDMETHODCALLTYPE *GetRVAStaticAddress )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ClassID classId,
+ /* [in] */ mdFieldDef fieldToken,
+ /* [out] */ void **ppAddress);
+
+ HRESULT ( STDMETHODCALLTYPE *GetAppDomainStaticAddress )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ClassID classId,
+ /* [in] */ mdFieldDef fieldToken,
+ /* [in] */ AppDomainID appDomainId,
+ /* [out] */ void **ppAddress);
+
+ HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ClassID classId,
+ /* [in] */ mdFieldDef fieldToken,
+ /* [in] */ ThreadID threadId,
+ /* [out] */ void **ppAddress);
+
+ HRESULT ( STDMETHODCALLTYPE *GetContextStaticAddress )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ClassID classId,
+ /* [in] */ mdFieldDef fieldToken,
+ /* [in] */ ContextID contextId,
+ /* [out] */ void **ppAddress);
+
+ HRESULT ( STDMETHODCALLTYPE *GetStaticFieldInfo )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ClassID classId,
+ /* [in] */ mdFieldDef fieldToken,
+ /* [out] */ COR_PRF_STATIC_TYPE *pFieldInfo);
+
+ HRESULT ( STDMETHODCALLTYPE *GetGenerationBounds )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ULONG cObjectRanges,
+ /* [out] */ ULONG *pcObjectRanges,
+ /* [length_is][size_is][out] */ COR_PRF_GC_GENERATION_RANGE ranges[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GetObjectGeneration )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ObjectID objectId,
+ /* [out] */ COR_PRF_GC_GENERATION_RANGE *range);
+
+ HRESULT ( STDMETHODCALLTYPE *GetNotifiedExceptionClauseInfo )(
+ ICorProfilerInfo7 * This,
+ /* [out] */ COR_PRF_EX_CLAUSE_INFO *pinfo);
+
+ HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions )(
+ ICorProfilerInfo7 * This,
+ /* [out] */ ICorProfilerFunctionEnum **ppEnum);
+
+ HRESULT ( STDMETHODCALLTYPE *RequestProfilerDetach )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ DWORD dwExpectedCompletionMilliseconds);
+
+ HRESULT ( STDMETHODCALLTYPE *SetFunctionIDMapper2 )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionIDMapper2 *pFunc,
+ /* [in] */ void *clientData);
+
+ HRESULT ( STDMETHODCALLTYPE *GetStringLayout2 )(
+ ICorProfilerInfo7 * This,
+ /* [out] */ ULONG *pStringLengthOffset,
+ /* [out] */ ULONG *pBufferOffset);
+
+ HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3 )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionEnter3 *pFuncEnter3,
+ /* [in] */ FunctionLeave3 *pFuncLeave3,
+ /* [in] */ FunctionTailcall3 *pFuncTailcall3);
+
+ HRESULT ( STDMETHODCALLTYPE *SetEnterLeaveFunctionHooks3WithInfo )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionEnter3WithInfo *pFuncEnter3WithInfo,
+ /* [in] */ FunctionLeave3WithInfo *pFuncLeave3WithInfo,
+ /* [in] */ FunctionTailcall3WithInfo *pFuncTailcall3WithInfo);
+
+ HRESULT ( STDMETHODCALLTYPE *GetFunctionEnter3Info )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ COR_PRF_ELT_INFO eltInfo,
+ /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo,
+ /* [out][in] */ ULONG *pcbArgumentInfo,
+ /* [size_is][out] */ COR_PRF_FUNCTION_ARGUMENT_INFO *pArgumentInfo);
+
+ HRESULT ( STDMETHODCALLTYPE *GetFunctionLeave3Info )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ COR_PRF_ELT_INFO eltInfo,
+ /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo,
+ /* [out] */ COR_PRF_FUNCTION_ARGUMENT_RANGE *pRetvalRange);
+
+ HRESULT ( STDMETHODCALLTYPE *GetFunctionTailcall3Info )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ COR_PRF_ELT_INFO eltInfo,
+ /* [out] */ COR_PRF_FRAME_INFO *pFrameInfo);
+
+ HRESULT ( STDMETHODCALLTYPE *EnumModules )(
+ ICorProfilerInfo7 * This,
+ /* [out] */ ICorProfilerModuleEnum **ppEnum);
+
+ HRESULT ( STDMETHODCALLTYPE *GetRuntimeInformation )(
+ ICorProfilerInfo7 * This,
+ /* [out] */ USHORT *pClrInstanceId,
+ /* [out] */ COR_PRF_RUNTIME_TYPE *pRuntimeType,
+ /* [out] */ USHORT *pMajorVersion,
+ /* [out] */ USHORT *pMinorVersion,
+ /* [out] */ USHORT *pBuildNumber,
+ /* [out] */ USHORT *pQFEVersion,
+ /* [in] */ ULONG cchVersionString,
+ /* [out] */ ULONG *pcchVersionString,
+ /* [annotation][out] */
+ _Out_writes_to_(cchVersionString, *pcchVersionString) WCHAR szVersionString[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GetThreadStaticAddress2 )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ClassID classId,
+ /* [in] */ mdFieldDef fieldToken,
+ /* [in] */ AppDomainID appDomainId,
+ /* [in] */ ThreadID threadId,
+ /* [out] */ void **ppAddress);
+
+ HRESULT ( STDMETHODCALLTYPE *GetAppDomainsContainingModule )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ ULONG32 cAppDomainIds,
+ /* [out] */ ULONG32 *pcAppDomainIds,
+ /* [length_is][size_is][out] */ AppDomainID appDomainIds[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GetModuleInfo2 )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [out] */ LPCBYTE *ppBaseLoadAddress,
+ /* [in] */ ULONG cchName,
+ /* [out] */ ULONG *pcchName,
+ /* [annotation][out] */
+ _Out_writes_to_(cchName, *pcchName) WCHAR szName[ ],
+ /* [out] */ AssemblyID *pAssemblyId,
+ /* [out] */ DWORD *pdwModuleFlags);
+
+ HRESULT ( STDMETHODCALLTYPE *EnumThreads )(
+ ICorProfilerInfo7 * This,
+ /* [out] */ ICorProfilerThreadEnum **ppEnum);
+
+ HRESULT ( STDMETHODCALLTYPE *InitializeCurrentThread )(
+ ICorProfilerInfo7 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *RequestReJIT )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ULONG cFunctions,
+ /* [size_is][in] */ ModuleID moduleIds[ ],
+ /* [size_is][in] */ mdMethodDef methodIds[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *RequestRevert )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ULONG cFunctions,
+ /* [size_is][in] */ ModuleID moduleIds[ ],
+ /* [size_is][in] */ mdMethodDef methodIds[ ],
+ /* [size_is][out] */ HRESULT status[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GetCodeInfo3 )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID functionID,
+ /* [in] */ ReJITID reJitId,
+ /* [in] */ ULONG32 cCodeInfos,
+ /* [out] */ ULONG32 *pcCodeInfos,
+ /* [length_is][size_is][out] */ COR_PRF_CODE_INFO codeInfos[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GetFunctionFromIP2 )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ LPCBYTE ip,
+ /* [out] */ FunctionID *pFunctionId,
+ /* [out] */ ReJITID *pReJitId);
+
+ HRESULT ( STDMETHODCALLTYPE *GetReJITIDs )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ULONG cReJitIds,
+ /* [out] */ ULONG *pcReJitIds,
+ /* [length_is][size_is][out] */ ReJITID reJitIds[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *GetILToNativeMapping2 )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ FunctionID functionId,
+ /* [in] */ ReJITID reJitId,
+ /* [in] */ ULONG32 cMap,
+ /* [out] */ ULONG32 *pcMap,
+ /* [length_is][size_is][out] */ COR_DEBUG_IL_TO_NATIVE_MAP map[ ]);
+
+ HRESULT ( STDMETHODCALLTYPE *EnumJITedFunctions2 )(
+ ICorProfilerInfo7 * This,
+ /* [out] */ ICorProfilerFunctionEnum **ppEnum);
+
+ HRESULT ( STDMETHODCALLTYPE *GetObjectSize2 )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ObjectID objectId,
+ /* [out] */ SIZE_T *pcSize);
+
+ HRESULT ( STDMETHODCALLTYPE *GetEventMask2 )(
+ ICorProfilerInfo7 * This,
+ /* [out] */ DWORD *pdwEventsLow,
+ /* [out] */ DWORD *pdwEventsHigh);
+
+ HRESULT ( STDMETHODCALLTYPE *SetEventMask2 )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ DWORD dwEventsLow,
+ /* [in] */ DWORD dwEventsHigh);
+
+ HRESULT ( STDMETHODCALLTYPE *EnumNgenModuleMethodsInliningThisMethod )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID inlinersModuleId,
+ /* [in] */ ModuleID inlineeModuleId,
+ /* [in] */ mdMethodDef inlineeMethodId,
+ /* [out] */ BOOL *incompleteData,
+ /* [out] */ ICorProfilerMethodEnum **ppEnum);
+
+ HRESULT ( STDMETHODCALLTYPE *ApplyMetaData )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleId);
+
+ HRESULT ( STDMETHODCALLTYPE *GetInMemorySymbolsLength )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [out] */ DWORD *pCountSymbolBytes);
+
+ HRESULT ( STDMETHODCALLTYPE *ReadInMemorySymbols )(
+ ICorProfilerInfo7 * This,
+ /* [in] */ ModuleID moduleId,
+ /* [in] */ DWORD symbolsReadOffset,
+ /* [out] */ BYTE *pSymbolBytes,
+ /* [in] */ DWORD countSymbolBytes,
+ /* [out] */ DWORD *pCountSymbolBytesRead);
+
+ END_INTERFACE
+ } ICorProfilerInfo7Vtbl;
+
+ interface ICorProfilerInfo7
+ {
+ CONST_VTBL struct ICorProfilerInfo7Vtbl *lpVtbl;
+ };
+
+
+
+#ifdef COBJMACROS
+
+
+#define ICorProfilerInfo7_QueryInterface(This,riid,ppvObject) \
+ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
+
+#define ICorProfilerInfo7_AddRef(This) \
+ ( (This)->lpVtbl -> AddRef(This) )
+
+#define ICorProfilerInfo7_Release(This) \
+ ( (This)->lpVtbl -> Release(This) )
+
+
+#define ICorProfilerInfo7_GetClassFromObject(This,objectId,pClassId) \
+ ( (This)->lpVtbl -> GetClassFromObject(This,objectId,pClassId) )
+
+#define ICorProfilerInfo7_GetClassFromToken(This,moduleId,typeDef,pClassId) \
+ ( (This)->lpVtbl -> GetClassFromToken(This,moduleId,typeDef,pClassId) )
+
+#define ICorProfilerInfo7_GetCodeInfo(This,functionId,pStart,pcSize) \
+ ( (This)->lpVtbl -> GetCodeInfo(This,functionId,pStart,pcSize) )
+
+#define ICorProfilerInfo7_GetEventMask(This,pdwEvents) \
+ ( (This)->lpVtbl -> GetEventMask(This,pdwEvents) )
+
+#define ICorProfilerInfo7_GetFunctionFromIP(This,ip,pFunctionId) \
+ ( (This)->lpVtbl -> GetFunctionFromIP(This,ip,pFunctionId) )
+
+#define ICorProfilerInfo7_GetFunctionFromToken(This,moduleId,token,pFunctionId) \
+ ( (This)->lpVtbl -> GetFunctionFromToken(This,moduleId,token,pFunctionId) )
+
+#define ICorProfilerInfo7_GetHandleFromThread(This,threadId,phThread) \
+ ( (This)->lpVtbl -> GetHandleFromThread(This,threadId,phThread) )
+
+#define ICorProfilerInfo7_GetObjectSize(This,objectId,pcSize) \
+ ( (This)->lpVtbl -> GetObjectSize(This,objectId,pcSize) )
+
+#define ICorProfilerInfo7_IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) \
+ ( (This)->lpVtbl -> IsArrayClass(This,classId,pBaseElemType,pBaseClassId,pcRank) )
+
+#define ICorProfilerInfo7_GetThreadInfo(This,threadId,pdwWin32ThreadId) \
+ ( (This)->lpVtbl -> GetThreadInfo(This,threadId,pdwWin32ThreadId) )
+
+#define ICorProfilerInfo7_GetCurrentThreadID(This,pThreadId) \
+ ( (This)->lpVtbl -> GetCurrentThreadID(This,pThreadId) )
+
+#define ICorProfilerInfo7_GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) \
+ ( (This)->lpVtbl -> GetClassIDInfo(This,classId,pModuleId,pTypeDefToken) )
+
+#define ICorProfilerInfo7_GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) \
+ ( (This)->lpVtbl -> GetFunctionInfo(This,functionId,pClassId,pModuleId,pToken) )
+
+#define ICorProfilerInfo7_SetEventMask(This,dwEvents) \
+ ( (This)->lpVtbl -> SetEventMask(This,dwEvents) )
+
+#define ICorProfilerInfo7_SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) \
+ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks(This,pFuncEnter,pFuncLeave,pFuncTailcall) )
+
+#define ICorProfilerInfo7_SetFunctionIDMapper(This,pFunc) \
+ ( (This)->lpVtbl -> SetFunctionIDMapper(This,pFunc) )
+
+#define ICorProfilerInfo7_GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) \
+ ( (This)->lpVtbl -> GetTokenAndMetaDataFromFunction(This,functionId,riid,ppImport,pToken) )
+
+#define ICorProfilerInfo7_GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) \
+ ( (This)->lpVtbl -> GetModuleInfo(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId) )
+
+#define ICorProfilerInfo7_GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) \
+ ( (This)->lpVtbl -> GetModuleMetaData(This,moduleId,dwOpenFlags,riid,ppOut) )
+
+#define ICorProfilerInfo7_GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) \
+ ( (This)->lpVtbl -> GetILFunctionBody(This,moduleId,methodId,ppMethodHeader,pcbMethodSize) )
+
+#define ICorProfilerInfo7_GetILFunctionBodyAllocator(This,moduleId,ppMalloc) \
+ ( (This)->lpVtbl -> GetILFunctionBodyAllocator(This,moduleId,ppMalloc) )
+
+#define ICorProfilerInfo7_SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) \
+ ( (This)->lpVtbl -> SetILFunctionBody(This,moduleId,methodid,pbNewILMethodHeader) )
+
+#define ICorProfilerInfo7_GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) \
+ ( (This)->lpVtbl -> GetAppDomainInfo(This,appDomainId,cchName,pcchName,szName,pProcessId) )
+
+#define ICorProfilerInfo7_GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) \
+ ( (This)->lpVtbl -> GetAssemblyInfo(This,assemblyId,cchName,pcchName,szName,pAppDomainId,pModuleId) )
+
+#define ICorProfilerInfo7_SetFunctionReJIT(This,functionId) \
+ ( (This)->lpVtbl -> SetFunctionReJIT(This,functionId) )
+
+#define ICorProfilerInfo7_ForceGC(This) \
+ ( (This)->lpVtbl -> ForceGC(This) )
+
+#define ICorProfilerInfo7_SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) \
+ ( (This)->lpVtbl -> SetILInstrumentedCodeMap(This,functionId,fStartJit,cILMapEntries,rgILMapEntries) )
+
+#define ICorProfilerInfo7_GetInprocInspectionInterface(This,ppicd) \
+ ( (This)->lpVtbl -> GetInprocInspectionInterface(This,ppicd) )
+
+#define ICorProfilerInfo7_GetInprocInspectionIThisThread(This,ppicd) \
+ ( (This)->lpVtbl -> GetInprocInspectionIThisThread(This,ppicd) )
+
+#define ICorProfilerInfo7_GetThreadContext(This,threadId,pContextId) \
+ ( (This)->lpVtbl -> GetThreadContext(This,threadId,pContextId) )
+
+#define ICorProfilerInfo7_BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) \
+ ( (This)->lpVtbl -> BeginInprocDebugging(This,fThisThreadOnly,pdwProfilerContext) )
+
+#define ICorProfilerInfo7_EndInprocDebugging(This,dwProfilerContext) \
+ ( (This)->lpVtbl -> EndInprocDebugging(This,dwProfilerContext) )
+
+#define ICorProfilerInfo7_GetILToNativeMapping(This,functionId,cMap,pcMap,map) \
+ ( (This)->lpVtbl -> GetILToNativeMapping(This,functionId,cMap,pcMap,map) )
+
+
+#define ICorProfilerInfo7_DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) \
+ ( (This)->lpVtbl -> DoStackSnapshot(This,thread,callback,infoFlags,clientData,context,contextSize) )
+
+#define ICorProfilerInfo7_SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) \
+ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks2(This,pFuncEnter,pFuncLeave,pFuncTailcall) )
+
+#define ICorProfilerInfo7_GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) \
+ ( (This)->lpVtbl -> GetFunctionInfo2(This,funcId,frameInfo,pClassId,pModuleId,pToken,cTypeArgs,pcTypeArgs,typeArgs) )
+
+#define ICorProfilerInfo7_GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) \
+ ( (This)->lpVtbl -> GetStringLayout(This,pBufferLengthOffset,pStringLengthOffset,pBufferOffset) )
+
+#define ICorProfilerInfo7_GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) \
+ ( (This)->lpVtbl -> GetClassLayout(This,classID,rFieldOffset,cFieldOffset,pcFieldOffset,pulClassSize) )
+
+#define ICorProfilerInfo7_GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) \
+ ( (This)->lpVtbl -> GetClassIDInfo2(This,classId,pModuleId,pTypeDefToken,pParentClassId,cNumTypeArgs,pcNumTypeArgs,typeArgs) )
+
+#define ICorProfilerInfo7_GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) \
+ ( (This)->lpVtbl -> GetCodeInfo2(This,functionID,cCodeInfos,pcCodeInfos,codeInfos) )
+
+#define ICorProfilerInfo7_GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) \
+ ( (This)->lpVtbl -> GetClassFromTokenAndTypeArgs(This,moduleID,typeDef,cTypeArgs,typeArgs,pClassID) )
+
+#define ICorProfilerInfo7_GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) \
+ ( (This)->lpVtbl -> GetFunctionFromTokenAndTypeArgs(This,moduleID,funcDef,classId,cTypeArgs,typeArgs,pFunctionID) )
+
+#define ICorProfilerInfo7_EnumModuleFrozenObjects(This,moduleID,ppEnum) \
+ ( (This)->lpVtbl -> EnumModuleFrozenObjects(This,moduleID,ppEnum) )
+
+#define ICorProfilerInfo7_GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) \
+ ( (This)->lpVtbl -> GetArrayObjectInfo(This,objectId,cDimensions,pDimensionSizes,pDimensionLowerBounds,ppData) )
+
+#define ICorProfilerInfo7_GetBoxClassLayout(This,classId,pBufferOffset) \
+ ( (This)->lpVtbl -> GetBoxClassLayout(This,classId,pBufferOffset) )
+
+#define ICorProfilerInfo7_GetThreadAppDomain(This,threadId,pAppDomainId) \
+ ( (This)->lpVtbl -> GetThreadAppDomain(This,threadId,pAppDomainId) )
+
+#define ICorProfilerInfo7_GetRVAStaticAddress(This,classId,fieldToken,ppAddress) \
+ ( (This)->lpVtbl -> GetRVAStaticAddress(This,classId,fieldToken,ppAddress) )
+
+#define ICorProfilerInfo7_GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) \
+ ( (This)->lpVtbl -> GetAppDomainStaticAddress(This,classId,fieldToken,appDomainId,ppAddress) )
+
+#define ICorProfilerInfo7_GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) \
+ ( (This)->lpVtbl -> GetThreadStaticAddress(This,classId,fieldToken,threadId,ppAddress) )
+
+#define ICorProfilerInfo7_GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) \
+ ( (This)->lpVtbl -> GetContextStaticAddress(This,classId,fieldToken,contextId,ppAddress) )
+
+#define ICorProfilerInfo7_GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) \
+ ( (This)->lpVtbl -> GetStaticFieldInfo(This,classId,fieldToken,pFieldInfo) )
+
+#define ICorProfilerInfo7_GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) \
+ ( (This)->lpVtbl -> GetGenerationBounds(This,cObjectRanges,pcObjectRanges,ranges) )
+
+#define ICorProfilerInfo7_GetObjectGeneration(This,objectId,range) \
+ ( (This)->lpVtbl -> GetObjectGeneration(This,objectId,range) )
+
+#define ICorProfilerInfo7_GetNotifiedExceptionClauseInfo(This,pinfo) \
+ ( (This)->lpVtbl -> GetNotifiedExceptionClauseInfo(This,pinfo) )
+
+
+#define ICorProfilerInfo7_EnumJITedFunctions(This,ppEnum) \
+ ( (This)->lpVtbl -> EnumJITedFunctions(This,ppEnum) )
+
+#define ICorProfilerInfo7_RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) \
+ ( (This)->lpVtbl -> RequestProfilerDetach(This,dwExpectedCompletionMilliseconds) )
+
+#define ICorProfilerInfo7_SetFunctionIDMapper2(This,pFunc,clientData) \
+ ( (This)->lpVtbl -> SetFunctionIDMapper2(This,pFunc,clientData) )
+
+#define ICorProfilerInfo7_GetStringLayout2(This,pStringLengthOffset,pBufferOffset) \
+ ( (This)->lpVtbl -> GetStringLayout2(This,pStringLengthOffset,pBufferOffset) )
+
+#define ICorProfilerInfo7_SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) \
+ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3(This,pFuncEnter3,pFuncLeave3,pFuncTailcall3) )
+
+#define ICorProfilerInfo7_SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) \
+ ( (This)->lpVtbl -> SetEnterLeaveFunctionHooks3WithInfo(This,pFuncEnter3WithInfo,pFuncLeave3WithInfo,pFuncTailcall3WithInfo) )
+
+#define ICorProfilerInfo7_GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) \
+ ( (This)->lpVtbl -> GetFunctionEnter3Info(This,functionId,eltInfo,pFrameInfo,pcbArgumentInfo,pArgumentInfo) )
+
+#define ICorProfilerInfo7_GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) \
+ ( (This)->lpVtbl -> GetFunctionLeave3Info(This,functionId,eltInfo,pFrameInfo,pRetvalRange) )
+
+#define ICorProfilerInfo7_GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) \
+ ( (This)->lpVtbl -> GetFunctionTailcall3Info(This,functionId,eltInfo,pFrameInfo) )
+
+#define ICorProfilerInfo7_EnumModules(This,ppEnum) \
+ ( (This)->lpVtbl -> EnumModules(This,ppEnum) )
+
+#define ICorProfilerInfo7_GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) \
+ ( (This)->lpVtbl -> GetRuntimeInformation(This,pClrInstanceId,pRuntimeType,pMajorVersion,pMinorVersion,pBuildNumber,pQFEVersion,cchVersionString,pcchVersionString,szVersionString) )
+
+#define ICorProfilerInfo7_GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) \
+ ( (This)->lpVtbl -> GetThreadStaticAddress2(This,classId,fieldToken,appDomainId,threadId,ppAddress) )
+
+#define ICorProfilerInfo7_GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) \
+ ( (This)->lpVtbl -> GetAppDomainsContainingModule(This,moduleId,cAppDomainIds,pcAppDomainIds,appDomainIds) )
+
+#define ICorProfilerInfo7_GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) \
+ ( (This)->lpVtbl -> GetModuleInfo2(This,moduleId,ppBaseLoadAddress,cchName,pcchName,szName,pAssemblyId,pdwModuleFlags) )
+
+
+#define ICorProfilerInfo7_EnumThreads(This,ppEnum) \
+ ( (This)->lpVtbl -> EnumThreads(This,ppEnum) )
+
+#define ICorProfilerInfo7_InitializeCurrentThread(This) \
+ ( (This)->lpVtbl -> InitializeCurrentThread(This) )
+
+#define ICorProfilerInfo7_RequestReJIT(This,cFunctions,moduleIds,methodIds) \
+ ( (This)->lpVtbl -> RequestReJIT(This,cFunctions,moduleIds,methodIds) )
+
+#define ICorProfilerInfo7_RequestRevert(This,cFunctions,moduleIds,methodIds,status) \
+ ( (This)->lpVtbl -> RequestRevert(This,cFunctions,moduleIds,methodIds,status) )
+
+#define ICorProfilerInfo7_GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) \
+ ( (This)->lpVtbl -> GetCodeInfo3(This,functionID,reJitId,cCodeInfos,pcCodeInfos,codeInfos) )
+
+#define ICorProfilerInfo7_GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) \
+ ( (This)->lpVtbl -> GetFunctionFromIP2(This,ip,pFunctionId,pReJitId) )
+
+#define ICorProfilerInfo7_GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) \
+ ( (This)->lpVtbl -> GetReJITIDs(This,functionId,cReJitIds,pcReJitIds,reJitIds) )
+
+#define ICorProfilerInfo7_GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) \
+ ( (This)->lpVtbl -> GetILToNativeMapping2(This,functionId,reJitId,cMap,pcMap,map) )
+
+#define ICorProfilerInfo7_EnumJITedFunctions2(This,ppEnum) \
+ ( (This)->lpVtbl -> EnumJITedFunctions2(This,ppEnum) )
+
+#define ICorProfilerInfo7_GetObjectSize2(This,objectId,pcSize) \
+ ( (This)->lpVtbl -> GetObjectSize2(This,objectId,pcSize) )
+
+
+#define ICorProfilerInfo7_GetEventMask2(This,pdwEventsLow,pdwEventsHigh) \
+ ( (This)->lpVtbl -> GetEventMask2(This,pdwEventsLow,pdwEventsHigh) )
+
+#define ICorProfilerInfo7_SetEventMask2(This,dwEventsLow,dwEventsHigh) \
+ ( (This)->lpVtbl -> SetEventMask2(This,dwEventsLow,dwEventsHigh) )
+
+
+#define ICorProfilerInfo7_EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) \
+ ( (This)->lpVtbl -> EnumNgenModuleMethodsInliningThisMethod(This,inlinersModuleId,inlineeModuleId,inlineeMethodId,incompleteData,ppEnum) )
+
+
+#define ICorProfilerInfo7_ApplyMetaData(This,moduleId) \
+ ( (This)->lpVtbl -> ApplyMetaData(This,moduleId) )
+
+#define ICorProfilerInfo7_GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) \
+ ( (This)->lpVtbl -> GetInMemorySymbolsLength(This,moduleId,pCountSymbolBytes) )
+
+#define ICorProfilerInfo7_ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) \
+ ( (This)->lpVtbl -> ReadInMemorySymbols(This,moduleId,symbolsReadOffset,pSymbolBytes,countSymbolBytes,pCountSymbolBytesRead) )
+
+#endif /* COBJMACROS */
+
+
+#endif /* C style interface */
+
+
+
+
+#endif /* __ICorProfilerInfo7_INTERFACE_DEFINED__ */
+
+
#ifndef __ICorProfilerMethodEnum_INTERFACE_DEFINED__
#define __ICorProfilerMethodEnum_INTERFACE_DEFINED__
diff --git a/src/pal/prebuilt/inc/sospriv.h b/src/pal/prebuilt/inc/sospriv.h
index aa42e44f64..b3826065bd 100644
--- a/src/pal/prebuilt/inc/sospriv.h
+++ b/src/pal/prebuilt/inc/sospriv.h
@@ -82,6 +82,13 @@ typedef interface ISOSDacInterface2 ISOSDacInterface2;
#endif /* __ISOSDacInterface2_FWD_DEFINED__ */
+#ifndef __ISOSDacInterface3_FWD_DEFINED__
+#define __ISOSDacInterface3_FWD_DEFINED__
+typedef interface ISOSDacInterface3 ISOSDacInterface3;
+
+#endif /* __ISOSDacInterface3_FWD_DEFINED__ */
+
+
/* header files for imported files */
#include "unknwn.h"
#include "xclrdata.h"
@@ -1981,6 +1988,108 @@ EXTERN_C const IID IID_ISOSDacInterface2;
#endif /* __ISOSDacInterface2_INTERFACE_DEFINED__ */
+#ifndef __ISOSDacInterface3_INTERFACE_DEFINED__
+#define __ISOSDacInterface3_INTERFACE_DEFINED__
+
+/* interface ISOSDacInterface3 */
+/* [uuid][local][object] */
+
+
+EXTERN_C const IID IID_ISOSDacInterface3;
+
+#if defined(__cplusplus) && !defined(CINTERFACE)
+
+ MIDL_INTERFACE("B08C5CDC-FD8A-49C5-AB38-5FEEF35235B4")
+ ISOSDacInterface3 : public IUnknown
+ {
+ public:
+ virtual HRESULT STDMETHODCALLTYPE GetGCInterestingInfoData(
+ CLRDATA_ADDRESS interestingInfoAddr,
+ struct DacpGCInterestingInfoData *data) = 0;
+
+ virtual HRESULT STDMETHODCALLTYPE GetGCInterestingInfoStaticData(
+ struct DacpGCInterestingInfoData *data) = 0;
+
+ virtual HRESULT STDMETHODCALLTYPE GetGCGlobalMechanisms(
+ size_t *globalMechanisms) = 0;
+
+ };
+
+
+#else /* C style interface */
+
+ typedef struct ISOSDacInterface3Vtbl
+ {
+ BEGIN_INTERFACE
+
+ HRESULT ( STDMETHODCALLTYPE *QueryInterface )(
+ ISOSDacInterface3 * This,
+ /* [in] */ REFIID riid,
+ /* [annotation][iid_is][out] */
+ _COM_Outptr_ void **ppvObject);
+
+ ULONG ( STDMETHODCALLTYPE *AddRef )(
+ ISOSDacInterface3 * This);
+
+ ULONG ( STDMETHODCALLTYPE *Release )(
+ ISOSDacInterface3 * This);
+
+ HRESULT ( STDMETHODCALLTYPE *GetGCInterestingInfoData )(
+ ISOSDacInterface3 * This,
+ CLRDATA_ADDRESS interestingInfoAddr,
+ struct DacpGCInterestingInfoData *data);
+
+ HRESULT ( STDMETHODCALLTYPE *GetGCInterestingInfoStaticData )(
+ ISOSDacInterface3 * This,
+ struct DacpGCInterestingInfoData *data);
+
+ HRESULT ( STDMETHODCALLTYPE *GetGCGlobalMechanisms )(
+ ISOSDacInterface3 * This,
+ size_t *globalMechanisms);
+
+ END_INTERFACE
+ } ISOSDacInterface3Vtbl;
+
+ interface ISOSDacInterface3
+ {
+ CONST_VTBL struct ISOSDacInterface3Vtbl *lpVtbl;
+ };
+
+
+
+#ifdef COBJMACROS
+
+
+#define ISOSDacInterface3_QueryInterface(This,riid,ppvObject) \
+ ( (This)->lpVtbl -> QueryInterface(This,riid,ppvObject) )
+
+#define ISOSDacInterface3_AddRef(This) \
+ ( (This)->lpVtbl -> AddRef(This) )
+
+#define ISOSDacInterface3_Release(This) \
+ ( (This)->lpVtbl -> Release(This) )
+
+
+#define ISOSDacInterface3_GetGCInterestingInfoData(This,interestingInfoAddr,data) \
+ ( (This)->lpVtbl -> GetGCInterestingInfoData(This,interestingInfoAddr,data) )
+
+#define ISOSDacInterface3_GetGCInterestingInfoStaticData(This,data) \
+ ( (This)->lpVtbl -> GetGCInterestingInfoStaticData(This,data) )
+
+#define ISOSDacInterface3_GetGCGlobalMechanisms(This,globalMechanisms) \
+ ( (This)->lpVtbl -> GetGCGlobalMechanisms(This,globalMechanisms) )
+
+#endif /* COBJMACROS */
+
+
+#endif /* C style interface */
+
+
+
+
+#endif /* __ISOSDacInterface3_INTERFACE_DEFINED__ */
+
+
/* Additional Prototypes for ALL interfaces */
/* end of Additional Prototypes */
diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp
index 698e032217..ac8e3b9c2b 100644
--- a/src/vm/ceeload.cpp
+++ b/src/vm/ceeload.cpp
@@ -1505,6 +1505,30 @@ Module *Module::Create(Assembly *pAssembly, mdFile moduleRef, PEFile *file, Allo
RETURN pModuleSafe.Extract();
}
+void Module::ApplyMetaData()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ LOG((LF_CLASSLOADER, LL_INFO100, "Module::ApplyNewMetaData %x\n", this));
+
+ HRESULT hr = S_OK;
+ ULONG ulCount;
+
+ // Ensure for TypeRef
+ ulCount = GetMDImport()->GetCountWithTokenKind(mdtTypeRef) + 1;
+ EnsureTypeRefCanBeStored(TokenFromRid(ulCount, mdtTypeRef));
+
+ // Ensure for AssemblyRef
+ ulCount = GetMDImport()->GetCountWithTokenKind(mdtAssemblyRef) + 1;
+ EnsureAssemblyRefCanBeStored(TokenFromRid(ulCount, mdtAssemblyRef));
+}
+
//
// Destructor for Module
//
@@ -4467,7 +4491,16 @@ void Module::SetSymbolBytes(LPCBYTE pbSyms, DWORD cbSyms)
&cbWritten);
IfFailThrow(HRESULT_FROM_WIN32(dwError));
+#if PROFILING_SUPPORTED && !defined(CROSSGEN_COMPILE)
+ BEGIN_PIN_PROFILER(CORProfilerInMemorySymbolsUpdatesEnabled());
+ {
+ g_profControlBlock.pProfInterface->ModuleInMemorySymbolsUpdated((ModuleID) this);
+ }
+ END_PIN_PROFILER();
+#endif //PROFILING_SUPPORTED && !defined(CROSSGEN_COMPILE)
+
ETW::CodeSymbolLog::EmitCodeSymbols(this);
+
// Tell the debugger that symbols have been loaded for this
// module. We iterate through all domains which contain this
// module's assembly, and send a debugger notify for each one.
@@ -6229,21 +6262,17 @@ Module *Module::GetModuleIfLoaded(mdFile kFile, BOOL onlyLoadedInAppDomain, BOOL
if (!permitResources && pModule && pModule->IsResource())
pModule = NULL;
+#ifndef DACCESS_COMPILE
#if defined(FEATURE_MULTIMODULE_ASSEMBLIES)
// check if actually loaded, unless happens during GC (GC works only with loaded assemblies)
if (!GCHeap::IsGCInProgress() && onlyLoadedInAppDomain && pModule && !pModule->IsManifest())
{
-#ifndef DACCESS_COMPILE
DomainModule *pDomainModule = pModule->FindDomainModule(GetAppDomain());
if (pDomainModule == NULL || !pDomainModule->IsLoaded())
pModule = NULL;
-#else
- // unfortunately DAC doesn't have a GetAppDomain() however multi-module
- // assemblies aren't very common so it should be ok to fail here for now.
- DacNotImpl();
-#endif // !DACCESS_COMPILE
}
#endif // FEATURE_MULTIMODULE_ASSEMBLIES
+#endif // !DACCESS_COMPILE
RETURN pModule;
}
diff --git a/src/vm/ceeload.h b/src/vm/ceeload.h
index 1f762b3751..4fecdef22e 100644
--- a/src/vm/ceeload.h
+++ b/src/vm/ceeload.h
@@ -1942,6 +1942,8 @@ protected:
BOOL IsManifest();
+ void ApplyMetaData();
+
#ifdef FEATURE_MIXEDMODE
void FixupVTables();
#endif
diff --git a/src/vm/codeman.cpp b/src/vm/codeman.cpp
index 5e409600fa..5375d02b45 100644
--- a/src/vm/codeman.cpp
+++ b/src/vm/codeman.cpp
@@ -1161,6 +1161,9 @@ EEJitManager::EEJitManager()
m_jit = NULL;
m_JITCompiler = NULL;
#ifdef _TARGET_AMD64_
+ m_pEmergencyJumpStubReserveList = NULL;
+#endif
+#ifdef _TARGET_AMD64_
m_JITCompilerOther = NULL;
#endif
#ifdef ALLOW_SXS_JIT
@@ -1177,6 +1180,42 @@ EEJitManager::EEJitManager()
#if defined(_TARGET_AMD64_)
extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16]);
extern "C" DWORD __stdcall xmmYmmStateSupport();
+
+bool DoesOSSupportAVX()
+{
+#ifndef FEATURE_PAL
+ // On Windows we have an api(GetEnabledXStateFeatures) to check if AVX is supported
+ typedef DWORD64 (WINAPI *PGETENABLEDXSTATEFEATURES)();
+ PGETENABLEDXSTATEFEATURES pfnGetEnabledXStateFeatures = NULL;
+
+ // Probe ApiSet first
+ HMODULE hMod = WszLoadLibraryEx(W("api-ms-win-core-xstate-l2-1-0.dll"), NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
+
+ if (hMod == NULL)
+ {
+ // On older OS's where apiset is not present probe kernel32
+ hMod = WszLoadLibraryEx(WINDOWS_KERNEL32_DLLNAME_W, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
+ if(hMod = NULL)
+ return FALSE;
+ }
+
+ pfnGetEnabledXStateFeatures = (PGETENABLEDXSTATEFEATURES)GetProcAddress(hMod, "GetEnabledXStateFeatures");
+
+ if (pfnGetEnabledXStateFeatures == NULL)
+ {
+ return FALSE;
+ }
+
+ DWORD64 FeatureMask = pfnGetEnabledXStateFeatures();
+ if ((FeatureMask & XSTATE_MASK_AVX) == 0)
+ {
+ return FALSE;
+ }
+
+ return TRUE;
+#endif // !FEATURE_PAL
+}
+
#endif // defined(_TARGET_AMD64_)
void EEJitManager::SetCpuInfo()
@@ -1250,15 +1289,18 @@ void EEJitManager::SetCpuInfo()
}
if ((buffer[11] & 0x18) == 0x18)
{
- if (xmmYmmStateSupport() == 1)
+ if(DoesOSSupportAVX())
{
- dwCPUCompileFlags |= CORJIT_FLG_USE_AVX;
- if (maxCpuId >= 0x07)
+ if (xmmYmmStateSupport() == 1)
{
- (void) getcpuid(0x07, buffer);
- if ((buffer[4] & 0x20) != 0)
+ dwCPUCompileFlags |= CORJIT_FLG_USE_AVX;
+ if (maxCpuId >= 0x07)
{
- dwCPUCompileFlags |= CORJIT_FLG_USE_AVX2;
+ (void) getcpuid(0x07, buffer);
+ if ((buffer[4] & 0x20) != 0)
+ {
+ dwCPUCompileFlags |= CORJIT_FLG_USE_AVX2;
+ }
}
}
}
@@ -1801,9 +1843,138 @@ void ThrowOutOfMemoryWithinRange()
GC_NOTRIGGER;
} CONTRACTL_END;
+ // Allow breaking into debugger or terminating the process when this exception occurs
+ switch (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_BreakOnOutOfMemoryWithinRange))
+ {
+ case 1:
+ DebugBreak();
+ break;
+ case 2:
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_OUTOFMEMORY);
+ break;
+ default:
+ break;
+ }
+
EX_THROW(EEMessageException, (kOutOfMemoryException, IDS_EE_OUT_OF_MEMORY_WITHIN_RANGE));
}
+#ifdef _TARGET_AMD64_
+BYTE * EEJitManager::AllocateFromEmergencyJumpStubReserve(const BYTE * loAddr, const BYTE * hiAddr, SIZE_T * pReserveSize)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(m_CodeHeapCritSec.OwnedByCurrentThread());
+ } CONTRACTL_END;
+
+ for (EmergencyJumpStubReserve ** ppPrev = &m_pEmergencyJumpStubReserveList; *ppPrev != NULL; ppPrev = &(*ppPrev)->m_pNext)
+ {
+ EmergencyJumpStubReserve * pList = *ppPrev;
+
+ if (loAddr <= pList->m_ptr &&
+ pList->m_ptr + pList->m_size < hiAddr)
+ {
+ *ppPrev = pList->m_pNext;
+
+ BYTE * pBlock = pList->m_ptr;
+ *pReserveSize = pList->m_size;
+
+ delete pList;
+
+ return pBlock;
+ }
+ }
+
+ return NULL;
+}
+
+VOID EEJitManager::EnsureJumpStubReserve(BYTE * pImageBase, SIZE_T imageSize, SIZE_T reserveSize)
+{
+ CONTRACTL {
+ THROWS;
+ GC_NOTRIGGER;
+ } CONTRACTL_END;
+
+ CrstHolder ch(&m_CodeHeapCritSec);
+
+ BYTE * loAddr = pImageBase + imageSize + INT32_MIN;
+ if (loAddr > pImageBase) loAddr = NULL; // overflow
+
+ BYTE * hiAddr = pImageBase + INT32_MAX;
+ if (hiAddr < pImageBase) hiAddr = (BYTE *)UINT64_MAX; // overflow
+
+ for (EmergencyJumpStubReserve * pList = m_pEmergencyJumpStubReserveList; pList != NULL; pList = pList->m_pNext)
+ {
+ if (loAddr <= pList->m_ptr &&
+ pList->m_ptr + pList->m_size < hiAddr)
+ {
+ SIZE_T used = min(reserveSize, pList->m_free);
+ pList->m_free -= used;
+
+ reserveSize -= used;
+ if (reserveSize == 0)
+ return;
+ }
+ }
+
+ // Try several different strategies - the most efficient one first
+ int allocMode = 0;
+
+ // Try to reserve at least 16MB at a time
+ SIZE_T allocChunk = max(ALIGN_UP(reserveSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY), 16*1024*1024);
+
+ while (reserveSize > 0)
+ {
+ NewHolder<EmergencyJumpStubReserve> pNewReserve(new EmergencyJumpStubReserve());
+
+ for (;;)
+ {
+ BYTE * loAddrCurrent = loAddr;
+ BYTE * hiAddrCurrent = hiAddr;
+
+ switch (allocMode)
+ {
+ case 0:
+ // First, try to allocate towards the center of the allowed range. It is more likely to
+ // satisfy subsequent reservations.
+ loAddrCurrent = loAddr + (hiAddr - loAddr) / 8;
+ hiAddrCurrent = hiAddr - (hiAddr - loAddr) / 8;
+ break;
+ case 1:
+ // Try the whole allowed range
+ break;
+ case 2:
+ // If the large allocation failed, retry with small chunk size
+ allocChunk = VIRTUAL_ALLOC_RESERVE_GRANULARITY;
+ break;
+ default:
+ return; // Unable to allocate the reserve - give up
+ }
+
+ pNewReserve->m_ptr = ClrVirtualAllocWithinRange(loAddrCurrent, hiAddrCurrent,
+ allocChunk, MEM_RESERVE, PAGE_NOACCESS);
+
+ if (pNewReserve->m_ptr != NULL)
+ break;
+
+ // Retry with the next allocation strategy
+ allocMode++;
+ }
+
+ SIZE_T used = min(allocChunk, reserveSize);
+ reserveSize -= used;
+
+ pNewReserve->m_size = allocChunk;
+ pNewReserve->m_free = allocChunk - used;
+
+ // Add it to the list
+ pNewReserve->m_pNext = m_pEmergencyJumpStubReserveList;
+ m_pEmergencyJumpStubReserveList = pNewReserve.Extract();
+ }
+}
+#endif // _TARGET_AMD64_
+
HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap *pJitMetaHeap)
{
CONTRACT(HeapList *) {
@@ -1838,6 +2009,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap
BYTE * pBaseAddr = NULL;
DWORD dwSizeAcquiredFromInitialBlock = 0;
+ bool fAllocatedFromEmergencyJumpStubReserve = false;
pBaseAddr = (BYTE *)pInfo->m_pAllocator->GetCodeHeapInitialBlock(loAddr, hiAddr, (DWORD)initialRequestSize, &dwSizeAcquiredFromInitialBlock);
if (pBaseAddr != NULL)
@@ -1850,8 +2022,18 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap
{
pBaseAddr = ClrVirtualAllocWithinRange(loAddr, hiAddr,
reserveSize, MEM_RESERVE, PAGE_NOACCESS);
+
if (!pBaseAddr)
+ {
+#ifdef _TARGET_AMD64_
+ pBaseAddr = ExecutionManager::GetEEJitManager()->AllocateFromEmergencyJumpStubReserve(loAddr, hiAddr, &reserveSize);
+ if (!pBaseAddr)
+ ThrowOutOfMemoryWithinRange();
+ fAllocatedFromEmergencyJumpStubReserve = true;
+#else
ThrowOutOfMemoryWithinRange();
+#endif // _TARGET_AMD64_
+ }
}
else
{
@@ -1881,7 +2063,7 @@ HeapList* LoaderCodeHeap::CreateCodeHeap(CodeHeapRequestInfo *pInfo, LoaderHeap
// We do not need to memset this memory, since ClrVirtualAlloc() guarantees that the memory is zero.
// Furthermore, if we avoid writing to it, these pages don't come into our working set
- pHp->bFull = false;
+ pHp->bFull = fAllocatedFromEmergencyJumpStubReserve;
pHp->bFullForJumpStubs = false;
pHp->cBlocks = 0;
@@ -2167,12 +2349,10 @@ void* EEJitManager::allocCodeRaw(CodeHeapRequestInfo *pInfo,
// allocation won't fail or handle jump stub allocation gracefully (see DevDiv #381823 and
// related bugs for details).
//
- static int codeHeapReserveForJumpStubs = -1;
-
- if (codeHeapReserveForJumpStubs == -1)
- codeHeapReserveForJumpStubs = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_CodeHeapReserveForJumpStubs);
+ static ConfigDWORD configCodeHeapReserveForJumpStubs;
+ int percentReserveForJumpStubs = configCodeHeapReserveForJumpStubs.val(CLRConfig::INTERNAL_CodeHeapReserveForJumpStubs);
- size_t reserveForJumpStubs = codeHeapReserveForJumpStubs * (pCodeHeap->maxCodeHeapSize / 100);
+ size_t reserveForJumpStubs = percentReserveForJumpStubs * (pCodeHeap->maxCodeHeapSize / 100);
size_t minReserveForJumpStubs = sizeof(CodeHeader) +
sizeof(JumpStubBlockHeader) + (size_t) DEFAULT_JUMPSTUBS_PER_BLOCK * BACK_TO_BACK_JUMP_ALLOCATE_SIZE +
diff --git a/src/vm/codeman.h b/src/vm/codeman.h
index 8740b6c78c..8e2c6293dc 100644
--- a/src/vm/codeman.h
+++ b/src/vm/codeman.h
@@ -1114,6 +1114,26 @@ private :
CUnorderedArray<DomainCodeHeapList *, 5> m_DomainCodeHeaps;
CUnorderedArray<DomainCodeHeapList *, 5> m_DynamicDomainCodeHeaps;
+#ifdef _TARGET_AMD64_
+private:
+ //
+ // List of reserved memory blocks to be used for jump stub allocation if no suitable memory block is found
+ // via the regular mechanism
+ //
+ struct EmergencyJumpStubReserve
+ {
+ EmergencyJumpStubReserve * m_pNext;
+ BYTE * m_ptr;
+ SIZE_T m_size;
+ SIZE_T m_free;
+ };
+ EmergencyJumpStubReserve * m_pEmergencyJumpStubReserveList;
+
+public:
+ BYTE * AllocateFromEmergencyJumpStubReserve(const BYTE * loAddr, const BYTE * hiAddr, SIZE_T * pReserveSize);
+ VOID EnsureJumpStubReserve(BYTE * pImageBase, SIZE_T imageSize, SIZE_T reserveSize);
+#endif
+
public:
ICorJitCompiler * m_jit;
HINSTANCE m_JITCompiler;
diff --git a/src/vm/compile.cpp b/src/vm/compile.cpp
index 23242df1db..846aa283ba 100644
--- a/src/vm/compile.cpp
+++ b/src/vm/compile.cpp
@@ -1053,6 +1053,14 @@ HRESULT CEECompileInfo::SetCompilationTarget(CORINFO_ASSEMBLY_HANDLE assembl
}
#endif
+#ifdef FEATURE_READYTORUN_COMPILER
+ if (IsReadyToRunCompilation() && !pModule->IsILOnly())
+ {
+ GetSvcLogger()->Printf(LogLevel_Error, W("Error: /readytorun not supported for mixed mode assemblies\n"));
+ return E_FAIL;
+ }
+#endif
+
return S_OK;
}
@@ -2875,7 +2883,35 @@ class QuickSortILNativeMapByIL : public CQuickSort<ICorDebugInfo::OffsetMapping>
return 1;
}
};
-
+
+// ----------------------------------------------------------------------------
+// Simple class to sort IL to Native mapping arrays by Native offset
+//
+class QuickSortILNativeMapByNativeOffset : public CQuickSort<ICorDebugInfo::OffsetMapping>
+{
+public:
+ QuickSortILNativeMapByNativeOffset(
+ ICorDebugInfo::OffsetMapping * rgMap,
+ int cEntries)
+ : CQuickSort<ICorDebugInfo::OffsetMapping>(rgMap, cEntries)
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ int Compare(ICorDebugInfo::OffsetMapping * pFirst,
+ ICorDebugInfo::OffsetMapping * pSecond)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (pFirst->nativeOffset < pSecond->nativeOffset)
+ return -1;
+ else if (pFirst->nativeOffset == pSecond->nativeOffset)
+ return 0;
+ else
+ return 1;
+ }
+};
+
// ----------------------------------------------------------------------------
// Simple structure used when merging the JIT manager's IL-to-native maps
// (ICorDebugInfo::OffsetMapping) with the IL PDB's source-to-IL map.
@@ -3030,6 +3066,8 @@ public:
}
};
+#define UNKNOWN_SOURCE_FILE_PATH L"unknown"
+
// ----------------------------------------------------------------------------
// Manages generating all PDB data for an EE Module. Directly responsible for writing the
// string table and file checksum subsections. One of these is instantiated per Module
@@ -3122,7 +3160,13 @@ private:
ReleaseHolder<ISymUnmanagedBinder> m_pBinder;
ReleaseHolder<ISymUnmanagedReader> m_pReader;
NewInterfaceArrayHolder<ISymUnmanagedDocument> m_rgpDocs; // All docs in the PDB Mod
- ULONG32 m_cDocs;
+ // I know m_ilPdbCount and m_finalPdbDocCount are confusing.Here is the reason :
+ // For NGenMethodLinesPdbWriter::WriteDebugSILLinesSubsection, we won't write the path info.
+ // In order to let WriteDebugSILLinesSubsection find "UNKNOWN_SOURCE_FILE_PATH" which does
+ // not exist in m_rgpDocs, no matter if we have IL PDB or not, we let m_finalPdbDocCount
+ // equal m_ilPdbDocCount + 1 and write the extra one path as "UNKNOWN_SOURCE_FILE_PATH"
+ ULONG32 m_ilPdbDocCount;
+ ULONG32 m_finalPdbDocCount;
// Keeps track of source file names and how they map to offsets in the relevant PDB
// subsections.
@@ -3148,8 +3192,9 @@ public:
m_dwExtraData(dwExtraData),
m_pBinder(pBinder),
m_pModule(pModule),
- m_wszManagedPDBSearchPath(wszManagedPDBSearchPath)
-
+ m_wszManagedPDBSearchPath(wszManagedPDBSearchPath),
+ m_ilPdbDocCount(0),
+ m_finalPdbDocCount(1)
{
LIMITED_METHOD_CONTRACT;
@@ -3161,7 +3206,7 @@ public:
HRESULT WritePDBData();
- HRESULT WriteMethodPDBData(PEImageLayout * pLoadedLayout, USHORT iCodeSection, BYTE *pCodeBase, MethodDesc * hotDesc, PCODE start);
+ HRESULT WriteMethodPDBData(PEImageLayout * pLoadedLayout, USHORT iCodeSection, BYTE *pCodeBase, MethodDesc * hotDesc, PCODE start, bool isILPDBProvided);
};
// ----------------------------------------------------------------------------
@@ -3182,6 +3227,7 @@ private:
const IJitManager::MethodRegionInfo * m_pMethodRegionInfo;
EECodeInfo * m_pCodeInfo;
DocNameToOffsetMap * m_pDocNameToOffsetMap;
+ bool m_isILPDBProvided;
// IL-to-native map from JIT manager
ULONG32 m_cIlNativeMap;
@@ -3193,16 +3239,36 @@ private:
NewArrayHolder<ULONG32> m_rgnLineStarts; // Array of source lines for this method
ULONG32 m_cSeqPoints; // Count of above two parallel arrays
- HRESULT WriteLinesSubsection(
+ HRESULT WriteNativeILMapPDBData();
+ LPBYTE InitDebugLinesHeaderSection(
+ DEBUG_S_SUBSECTION_TYPE type,
+ ULONG32 ulCodeStartOffset,
+ ULONG32 cbCode,
+ ULONG32 lineSize,
+ CV_DebugSSubsectionHeader_t **ppSubSectHeader /*out*/,
+ CV_DebugSLinesHeader_t ** ppLinesHeader /*out*/,
+ LPBYTE * ppbLinesSubsectionCur /*out*/);
+
+ HRESULT WriteDebugSLinesSubsection(
ULONG32 ulCodeStartOffset,
ULONG32 cbCode,
MapIndexPair * rgMapIndexPairs,
ULONG32 cMapIndexPairs);
+ HRESULT WriteDebugSILLinesSubsection(
+ ULONG32 ulCodeStartOffset,
+ ULONG32 cbCode,
+ ICorDebugInfo::OffsetMapping * rgILNativeMap,
+ ULONG32 rgILNativeMapAdjustSize);
+
BOOL FinalizeLinesFileBlock(
- CV_DebugSLinesFileBlockHeader_t * pLinesFileBlockHeader,
+ CV_DebugSLinesFileBlockHeader_t * pLinesFileBlockHeader,
CV_Line_t * pLineBlockStart,
- CV_Line_t * pLineBlockAfterEnd);
+ CV_Line_t * pLineBlockAfterEnd
+#ifdef _DEBUG
+ , BOOL ignorekUnmappedIPCheck = false
+#endif
+ );
public:
NGenMethodLinesPdbWriter(
@@ -3215,7 +3281,8 @@ public:
TADDR addrCodeSection,
const IJitManager::MethodRegionInfo * pMethodRegionInfo,
EECodeInfo * pCodeInfo,
- DocNameToOffsetMap * pDocNameToOffsetMap)
+ DocNameToOffsetMap * pDocNameToOffsetMap,
+ bool isILPDBProvided)
: m_pWriter(pWriter),
m_pMod(pMod),
m_pReader(pReader),
@@ -3227,7 +3294,8 @@ public:
m_pCodeInfo(pCodeInfo),
m_pDocNameToOffsetMap(pDocNameToOffsetMap),
m_cIlNativeMap(0),
- m_cSeqPoints(0)
+ m_cSeqPoints(0),
+ m_isILPDBProvided(isILPDBProvided)
{
LIMITED_METHOD_CONTRACT;
}
@@ -3277,7 +3345,7 @@ HRESULT NGenModulePdbWriter::WriteStringTable()
UINT64 cbStringTableEstimate =
sizeof(DWORD) +
sizeof(CV_DebugSSubsectionHeader_t) +
- m_cDocs * (MAX_LONGPATH + 1);
+ m_finalPdbDocCount * (MAX_LONGPATH + 1);
if (!FitsIn<ULONG32>(cbStringTableEstimate))
{
return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
@@ -3302,14 +3370,21 @@ HRESULT NGenModulePdbWriter::WriteStringTable()
LPBYTE pbStringTableStart = pbStringTableSubsectionCur;
// The actual strings
- for (ULONG32 i=0; i < m_cDocs; i++)
- {
- WCHAR wszURL[MAX_LONGPATH];
+ for (ULONG32 i = 0; i < m_finalPdbDocCount; i++)
+ {
+ // For NGenMethodLinesPdbWriter::WriteDebugSILLinesSubsection, we won't write the path info.
+ // In order to let WriteDebugSILLinesSubsection can find "UNKNOWN_SOURCE_FILE_PATH" which is
+ // not existed in m_rgpDocs, no matter we have IL PDB or not, we let m_finalPdbDocCount equals to
+ // m_ilPdbDocCount + 1 and write the extra one path as "UNKNOWN_SOURCE_FILE_PATH". That also explains
+ // why we have a inconsistence between m_finalPdbDocCount and m_ilPdbDocCount.
+ WCHAR wszURL[MAX_LONGPATH] = UNKNOWN_SOURCE_FILE_PATH;
ULONG32 cchURL;
- hr = m_rgpDocs[i]->GetURL(_countof(wszURL), &cchURL, wszURL);
- if (FAILED(hr))
- return hr;
-
+ if (i < m_ilPdbDocCount)
+ {
+ hr = m_rgpDocs[i]->GetURL(_countof(wszURL), &cchURL, wszURL);
+ if (FAILED(hr))
+ return hr;
+ }
int cbWritten = WideCharToMultiByte(
CP_UTF8,
0, // dwFlags
@@ -3417,12 +3492,13 @@ HRESULT NGenModulePdbWriter::InitILPdbData()
m_rgpDocs = new ISymUnmanagedDocument * [cDocs];
hr = m_pReader->GetDocuments(
cDocs,
- &m_cDocs,
+ &m_ilPdbDocCount,
m_rgpDocs);
if (FAILED(hr))
return hr;
+ m_finalPdbDocCount = m_ilPdbDocCount + 1;
// Commit m_rgpDocs to calling Release() on each ISymUnmanagedDocument* in the array
- m_rgpDocs.SetElementCount(m_cDocs);
+ m_rgpDocs.SetElementCount(m_ilPdbDocCount);
return S_OK;
}
@@ -3445,11 +3521,13 @@ HRESULT NGenModulePdbWriter::WritePDBData()
// This will try to open the managed PDB if lines info was requested. This is a
// likely failure point, so intentionally do this before creating the NGEN PDB file
// on disk.
+ bool isILPDBProvided = false;
if ((m_dwExtraData & kPDBLines) != 0)
{
hr = InitILPdbData();
if (FAILED(hr))
return hr;
+ isILPDBProvided = true;
}
// Create the PDB file we will write into.
@@ -3487,20 +3565,19 @@ HRESULT NGenModulePdbWriter::WritePDBData()
m_deletePDBFileHolder.Assign(m_wszPDBFilePath);
}
- if ((m_dwExtraData & kPDBLines) != 0)
- {
- hr = m_pdbMod.Open(m_pWriter, pLoadedLayout->GetPath(), m_pModule->GetPath());
- if (FAILED(hr))
- return hr;
- hr = WriteStringTable();
- if (FAILED(hr))
- return hr;
+ hr = m_pdbMod.Open(m_pWriter, pLoadedLayout->GetPath(), m_pModule->GetPath());
+ if (FAILED(hr))
+ return hr;
- hr = WriteFileChecksums();
- if (FAILED(hr))
- return hr;
- }
+ hr = WriteStringTable();
+ if (FAILED(hr))
+ return hr;
+
+ hr = WriteFileChecksums();
+ if (FAILED(hr))
+ return hr;
+
COUNT_T sectionCount = pLoadedLayout->GetNumberOfSections();
IMAGE_SECTION_HEADER *section = pLoadedLayout->FindFirstSection();
@@ -3522,23 +3599,20 @@ HRESULT NGenModulePdbWriter::WritePDBData()
pCodeBase = (BYTE *)section[sectionIndex].VirtualAddress;
}
- if ((m_dwExtraData & kPDBLines) != 0)
- {
- // In order to support the DIA RVA-to-lines API against the PDB we're
- // generating, we need to update the section contribution table with each
- // section we add.
- hr = m_pWriter->ModAddSecContribEx(
- m_pdbMod.GetModPtr(),
- (USHORT)(sectionIndex + 1),
- 0,
- section[sectionIndex].SizeOfRawData,
- section[sectionIndex].Characteristics,
- 0, // dwDataCrc
- 0 // dwRelocCrc
- );
- if (FAILED(hr))
- return hr;
- }
+ // In order to support the DIA RVA-to-lines API against the PDB we're
+ // generating, we need to update the section contribution table with each
+ // section we add.
+ hr = m_pWriter->ModAddSecContribEx(
+ m_pdbMod.GetModPtr(),
+ (USHORT)(sectionIndex + 1),
+ 0,
+ section[sectionIndex].SizeOfRawData,
+ section[sectionIndex].Characteristics,
+ 0, // dwDataCrc
+ 0 // dwRelocCrc
+ );
+ if (FAILED(hr))
+ return hr;
sectionIndex++;
}
@@ -3546,17 +3620,16 @@ HRESULT NGenModulePdbWriter::WritePDBData()
_ASSERTE(iCodeSection != 0);
_ASSERTE(pCodeBase != NULL);
- if ((m_dwExtraData & kPDBLines) != 0)
- {
- // To support lines info, we need a "dummy" section, indexed as 0, for use as a
- // sentinel when MSPDB sets up its section contribution table
- hr = m_pWriter->AddSection(0, // Dummy section 0
- OMF_SentinelType,
- 0,
- 0xFFFFffff);
- if (FAILED(hr))
- return hr;
- }
+
+ // To support lines info, we need a "dummy" section, indexed as 0, for use as a
+ // sentinel when MSPDB sets up its section contribution table
+ hr = m_pWriter->AddSection(0, // Dummy section 0
+ OMF_SentinelType,
+ 0,
+ 0xFFFFffff);
+ if (FAILED(hr))
+ return hr;
+
#ifdef FEATURE_READYTORUN_COMPILER
if (pLoadedLayout->HasReadyToRunHeader())
@@ -3566,7 +3639,7 @@ HRESULT NGenModulePdbWriter::WritePDBData()
{
MethodDesc *hotDesc = mi.GetMethodDesc();
- hr = WriteMethodPDBData(pLoadedLayout, iCodeSection, pCodeBase, hotDesc, mi.GetMethodStartAddress());
+ hr = WriteMethodPDBData(pLoadedLayout, iCodeSection, pCodeBase, hotDesc, mi.GetMethodStartAddress(), isILPDBProvided);
if (FAILED(hr))
return hr;
}
@@ -3580,7 +3653,7 @@ HRESULT NGenModulePdbWriter::WritePDBData()
MethodDesc *hotDesc = mi.GetMethodDesc();
hotDesc->CheckRestore();
- hr = WriteMethodPDBData(pLoadedLayout, iCodeSection, pCodeBase, hotDesc, mi.GetMethodStartAddress());
+ hr = WriteMethodPDBData(pLoadedLayout, iCodeSection, pCodeBase, hotDesc, mi.GetMethodStartAddress(), isILPDBProvided);
if (FAILED(hr))
return hr;
}
@@ -3591,7 +3664,7 @@ HRESULT NGenModulePdbWriter::WritePDBData()
return S_OK;
}
-HRESULT NGenModulePdbWriter::WriteMethodPDBData(PEImageLayout * pLoadedLayout, USHORT iCodeSection, BYTE *pCodeBase, MethodDesc * hotDesc, PCODE start)
+HRESULT NGenModulePdbWriter::WriteMethodPDBData(PEImageLayout * pLoadedLayout, USHORT iCodeSection, BYTE *pCodeBase, MethodDesc * hotDesc, PCODE start, bool isILPDBProvided)
{
STANDARD_VM_CONTRACT;
@@ -3607,6 +3680,12 @@ HRESULT NGenModulePdbWriter::WriteMethodPDBData(PEImageLayout * pLoadedLayout, U
_ASSERTE(pHotCodeStart);
PCODE pColdCodeStart = methodRegionInfo.coldStartAddress;
+ SString mAssemblyName;
+ mAssemblyName.SetUTF8(m_pModule->GetAssembly()->GetSimpleName());
+ SString assemblyName;
+ assemblyName.SetUTF8(hotDesc->GetAssembly()->GetSimpleName());
+ SString methodToken;
+ methodToken.Printf("%X", hotDesc->GetMemberDef());
// Hot name
{
@@ -3615,7 +3694,11 @@ HRESULT NGenModulePdbWriter::WriteMethodPDBData(PEImageLayout * pLoadedLayout, U
fullName,
hotDesc,
TypeString::FormatNamespace | TypeString::FormatSignature);
-
+ fullName.Append(L"$#");
+ if (!mAssemblyName.Equals(assemblyName))
+ fullName.Append(assemblyName);
+ fullName.Append(L"#");
+ fullName.Append(methodToken);
BSTRHolder hotNameHolder(SysAllocString(fullName.GetUnicode()));
hr = m_pWriter->AddSymbol(hotNameHolder,
iCodeSection,
@@ -3634,7 +3717,12 @@ HRESULT NGenModulePdbWriter::WriteMethodPDBData(PEImageLayout * pLoadedLayout, U
fullNameCold,
hotDesc,
TypeString::FormatNamespace | TypeString::FormatSignature);
-
+ fullNameCold.Append(L"$#");
+ if (!mAssemblyName.Equals(assemblyName))
+ fullNameCold.Append(assemblyName);
+ fullNameCold.Append(L"#");
+ fullNameCold.Append(methodToken);
+
BSTRHolder coldNameHolder(SysAllocString(fullNameCold.GetUnicode()));
hr = m_pWriter->AddSymbol(coldNameHolder,
iCodeSection,
@@ -3647,10 +3735,8 @@ HRESULT NGenModulePdbWriter::WriteMethodPDBData(PEImageLayout * pLoadedLayout, U
}
// Offset / lines mapping
- if (((m_dwExtraData & kPDBLines) != 0) &&
-
- // Skip functions that are too big for PDB lines format
- FitsIn<DWORD>(methodRegionInfo.hotSize) &&
+ // Skip functions that are too big for PDB lines format
+ if (FitsIn<DWORD>(methodRegionInfo.hotSize) &&
FitsIn<DWORD>(methodRegionInfo.coldSize))
{
NGenMethodLinesPdbWriter methodLinesWriter(
@@ -3663,7 +3749,8 @@ HRESULT NGenModulePdbWriter::WriteMethodPDBData(PEImageLayout * pLoadedLayout, U
(TADDR)pLoadedLayout->GetBase() + (TADDR)pCodeBase,
&methodRegionInfo,
&codeInfo,
- &m_docNameToOffsetMap);
+ &m_docNameToOffsetMap,
+ isILPDBProvided);
hr = methodLinesWriter.WritePDBData();
if (FAILED(hr))
@@ -3700,7 +3787,7 @@ HRESULT NGenModulePdbWriter::WriteFileChecksums()
UINT64 cbChecksumSubsectionEstimate =
sizeof(DWORD) +
sizeof(CV_DebugSSubsectionHeader_t) +
- m_cDocs * kcbEachChecksumEstimate;
+ m_finalPdbDocCount * kcbEachChecksumEstimate;
if (!FitsIn<ULONG32>(cbChecksumSubsectionEstimate))
{
return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
@@ -3724,14 +3811,25 @@ HRESULT NGenModulePdbWriter::WriteFileChecksums()
// (3) Iterate through source files, steal their checksum info from the IL PDB, and
// write it into the NGEN PDB.
- for (ULONG32 i=0; i < m_cDocs; i++)
+ for (ULONG32 i = 0; i < m_finalPdbDocCount; i++)
{
- WCHAR wszURL[MAX_LONGPATH];
+ WCHAR wszURL[MAX_LONGPATH] = UNKNOWN_SOURCE_FILE_PATH;
char szURL[MAX_LONGPATH];
ULONG32 cchURL;
- hr = m_rgpDocs[i]->GetURL(_countof(wszURL), &cchURL, wszURL);
- if (FAILED(hr))
- return hr;
+
+
+ bool isKnownSourcePath = i < m_ilPdbDocCount;
+ if (isKnownSourcePath)
+ {
+ // For NGenMethodLinesPdbWriter::WriteDebugSILLinesSubsection, we won't write the path info.
+ // In order to let WriteDebugSILLinesSubsection can find "UNKNOWN_SOURCE_FILE_PATH" which is
+ // not existed in m_rgpDocs, no matter we have IL PDB or not, we let m_finalPdbDocCount equals to
+ // m_ilPdbDocCount + 1 and write the extra one path as "UNKNOWN_SOURCE_FILE_PATH". That also explains
+ // why we have a inconsistence between m_finalPdbDocCount and m_ilPdbDocCount.
+ hr = m_rgpDocs[i]->GetURL(_countof(wszURL), &cchURL, wszURL);
+ if (FAILED(hr))
+ return hr;
+ }
int cbWritten = WideCharToMultiByte(
CP_UTF8,
@@ -3773,16 +3871,19 @@ HRESULT NGenModulePdbWriter::WriteFileChecksums()
BYTE rgbChecksum[kcbEachChecksumEstimate];
ULONG32 cbChecksum = 0;
BYTE bChecksumAlgorithmType = CHKSUM_TYPE_NONE;
- GUID guidChecksumAlgorithm;
- hr = m_rgpDocs[i]->GetCheckSumAlgorithmId(&guidChecksumAlgorithm);
- if (SUCCEEDED(hr))
+ if (isKnownSourcePath)
{
- // If we got the checksum algorithm, we can write it all out to the buffer.
- // Else, we'll just omit the checksum info
- if (memcmp(&guidChecksumAlgorithm, &CorSym_SourceHash_MD5, sizeof(GUID)) == 0)
- bChecksumAlgorithmType = CHKSUM_TYPE_MD5;
- else if (memcmp(&guidChecksumAlgorithm, &CorSym_SourceHash_SHA1, sizeof(GUID)) == 0)
- bChecksumAlgorithmType = CHKSUM_TYPE_SHA1;
+ GUID guidChecksumAlgorithm;
+ hr = m_rgpDocs[i]->GetCheckSumAlgorithmId(&guidChecksumAlgorithm);
+ if (SUCCEEDED(hr))
+ {
+ // If we got the checksum algorithm, we can write it all out to the buffer.
+ // Else, we'll just omit the checksum info
+ if (memcmp(&guidChecksumAlgorithm, &CorSym_SourceHash_MD5, sizeof(GUID)) == 0)
+ bChecksumAlgorithmType = CHKSUM_TYPE_MD5;
+ else if (memcmp(&guidChecksumAlgorithm, &CorSym_SourceHash_SHA1, sizeof(GUID)) == 0)
+ bChecksumAlgorithmType = CHKSUM_TYPE_SHA1;
+ }
}
if (bChecksumAlgorithmType != CHKSUM_TYPE_NONE)
@@ -3868,6 +3969,16 @@ HRESULT NGenMethodLinesPdbWriter::WritePDBData()
// Shouldn't happen, but just skip this method if it does
return S_OK;
}
+ HRESULT hr;
+ if (FAILED(hr = WriteNativeILMapPDBData()))
+ {
+ return hr;
+ }
+
+ if (!m_isILPDBProvided)
+ {
+ return S_OK;
+ }
// We will traverse this IL-to-native map (from the JIT) in parallel with the
// source-to-IL map provided by the IL PDB (below). Both need to be sorted by IL so
@@ -3879,7 +3990,7 @@ HRESULT NGenMethodLinesPdbWriter::WritePDBData()
// according to the IL PDB API)
ReleaseHolder<ISymUnmanagedMethod> pMethod;
- HRESULT hr = m_pReader->GetMethod(
+ hr = m_pReader->GetMethod(
m_hotDesc->GetMemberDef(),
&pMethod);
if (FAILED(hr))
@@ -3944,7 +4055,7 @@ HRESULT NGenMethodLinesPdbWriter::WritePDBData()
ULONG32 iIlNativeMap = 0;
ULONG32 iMapIndexPairs = 0;
-
+
// Traverse IL PDB entries and IL-to-native map entries (both sorted by IL) in
// parallel
//
@@ -4008,7 +4119,16 @@ HRESULT NGenMethodLinesPdbWriter::WritePDBData()
// Reset our memory of the last unmatched entry in the IL PDB
iSeqPointLastUnmatched = (ULONG32) -1;
}
-
+ else if (iMapIndexPairs > 0)
+ {
+ DWORD lastMatchedilNativeIndex = rgMapIndexPairs[iMapIndexPairs - 1].m_iIlNativeMap;
+ if (m_rgIlNativeMap[iIlNativeMap].ilOffset == m_rgIlNativeMap[lastMatchedilNativeIndex].ilOffset &&
+ m_rgIlNativeMap[iIlNativeMap].nativeOffset < m_rgIlNativeMap[lastMatchedilNativeIndex].nativeOffset)
+ {
+ rgMapIndexPairs[iMapIndexPairs - 1].m_iIlNativeMap = iIlNativeMap;
+ }
+
+ }
// Go to next ilnative map entry
iIlNativeMap++;
continue;
@@ -4081,7 +4201,7 @@ HRESULT NGenMethodLinesPdbWriter::WritePDBData()
}
// Write out the hot region into its own lines-file subsection
- hr = WriteLinesSubsection(
+ hr = WriteDebugSLinesSubsection(
ULONG32(m_pMethodRegionInfo->hotStartAddress - m_addrCodeSection),
ULONG32(m_pMethodRegionInfo->hotSize),
rgMapIndexPairs,
@@ -4093,7 +4213,7 @@ HRESULT NGenMethodLinesPdbWriter::WritePDBData()
// region
if (iMapIndexPairsFirstEntryInColdSection < cMapIndexPairs)
{
- hr = WriteLinesSubsection(
+ hr = WriteDebugSLinesSubsection(
ULONG32(m_pMethodRegionInfo->coldStartAddress - m_addrCodeSection),
ULONG32(m_pMethodRegionInfo->coldSize),
&rgMapIndexPairs[iMapIndexPairsFirstEntryInColdSection],
@@ -4107,6 +4227,145 @@ HRESULT NGenMethodLinesPdbWriter::WritePDBData()
//---------------------------------------------------------------------------------------
//
+// Manages the writing of all native-IL subsections requred for a given method. Almost do
+// the same thing as NGenMethodLinesPdbWriter::WritePDBData. But we will write the native-IL
+// map this time.
+//
+
+HRESULT NGenMethodLinesPdbWriter::WriteNativeILMapPDBData()
+{
+ STANDARD_VM_CONTRACT;
+
+ HRESULT hr;
+
+ QuickSortILNativeMapByNativeOffset sorterByNativeOffset(m_rgIlNativeMap, m_cIlNativeMap);
+ sorterByNativeOffset.Sort();
+
+ ULONG32 iIlNativeMap = 0;
+ ULONG32 ilNativeMapFirstEntryInColdeSection = m_cIlNativeMap;
+ for (iIlNativeMap = 0; iIlNativeMap < m_cIlNativeMap; iIlNativeMap++)
+ {
+ if (m_rgIlNativeMap[iIlNativeMap].nativeOffset >= m_pMethodRegionInfo->hotSize)
+ {
+ ilNativeMapFirstEntryInColdeSection = iIlNativeMap;
+ break;
+ }
+ }
+
+ NewArrayHolder<ICorDebugInfo::OffsetMapping> coldRgIlNativeMap(new ICorDebugInfo::OffsetMapping[m_cIlNativeMap - ilNativeMapFirstEntryInColdeSection]);
+ // Adjust the cold offsets (if any) to be relative to the cold start
+ for (iIlNativeMap = ilNativeMapFirstEntryInColdeSection; iIlNativeMap < m_cIlNativeMap; iIlNativeMap++)
+ {
+ DWORD dwNativeOffset = m_rgIlNativeMap[iIlNativeMap].nativeOffset;
+ _ASSERTE(dwNativeOffset >= m_pMethodRegionInfo->hotSize);
+
+ // Adjust offset so it's relative to the cold region start
+ dwNativeOffset -= DWORD(m_pMethodRegionInfo->hotSize);
+ _ASSERTE(dwNativeOffset < m_pMethodRegionInfo->coldSize);
+ coldRgIlNativeMap[iIlNativeMap - ilNativeMapFirstEntryInColdeSection].ilOffset = m_rgIlNativeMap[iIlNativeMap].ilOffset;
+ coldRgIlNativeMap[iIlNativeMap - ilNativeMapFirstEntryInColdeSection].nativeOffset = dwNativeOffset;
+ coldRgIlNativeMap[iIlNativeMap - ilNativeMapFirstEntryInColdeSection].source = m_rgIlNativeMap[iIlNativeMap].source;
+ }
+
+ // Write out the hot region into its own lines-file subsection
+ hr = WriteDebugSILLinesSubsection(
+ ULONG32(m_pMethodRegionInfo->hotStartAddress - m_addrCodeSection),
+ ULONG32(m_pMethodRegionInfo->hotSize),
+ m_rgIlNativeMap,
+ ilNativeMapFirstEntryInColdeSection);
+ if (FAILED(hr))
+ return hr;
+
+ // If there was a hot/cold split, write a separate lines-file subsection for the cold
+ // region
+ if (ilNativeMapFirstEntryInColdeSection < m_cIlNativeMap)
+ {
+ hr = WriteDebugSILLinesSubsection(
+ ULONG32(m_pMethodRegionInfo->coldStartAddress - m_addrCodeSection),
+ ULONG32(m_pMethodRegionInfo->coldSize),
+ coldRgIlNativeMap,
+ m_cIlNativeMap - ilNativeMapFirstEntryInColdeSection);
+ if (FAILED(hr))
+ return hr;
+ }
+
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Helper called by NGenMethodLinesPdbWriter::WriteDebugSLinesSubsection and
+// NGenMethodLinesPdbWriter::WriteDebugSILLinesSubsection to initial the DEBUG_S*_LINE
+// subsection headers.
+//
+// Arguments:
+// * ulCodeStartOffset - Offset relative to the code section, or where this region
+// of code begins
+// * type - the subsection's type
+// * lineSize - how many lines mapping the subsection will have.
+// * cbCode - Size in bytes of this region of code
+// * ppSubSectHeader - output value which returns the intialed CV_DebugSLinesHeader_t struct pointer.
+// * ppLinesHeader - output value which returns the initialed CV_DebugSLinesHeader_t struct pointer.
+// * ppbLinesSubsectionCur - output value which points to the address right after the DebugSLinesHeader
+//
+// Return Value:
+// * Pointer which points the staring address of the SubSection.
+//
+
+LPBYTE NGenMethodLinesPdbWriter::InitDebugLinesHeaderSection(
+ DEBUG_S_SUBSECTION_TYPE type,
+ ULONG32 ulCodeStartOffset,
+ ULONG32 cbCode,
+ ULONG32 lineSize,
+ CV_DebugSSubsectionHeader_t **ppSubSectHeader /*out*/,
+ CV_DebugSLinesHeader_t ** ppLinesHeader /*out*/,
+ LPBYTE * ppbLinesSubsectionCur /*out*/)
+{
+ STANDARD_VM_CONTRACT;
+
+ UINT64 cbLinesSubsectionEstimate =
+ sizeof(DWORD) +
+ sizeof(CV_DebugSSubsectionHeader_t) +
+ sizeof(CV_DebugSLinesHeader_t) +
+ // Worst case: assume each sequence point will require its own
+ // CV_DebugSLinesFileBlockHeader_t
+ (lineSize * (sizeof(CV_DebugSLinesFileBlockHeader_t) + sizeof(CV_Line_t)));
+ if (!FitsIn<ULONG32>(cbLinesSubsectionEstimate))
+ {
+ return NULL;
+ }
+
+ LPBYTE rgbLinesSubsection = new BYTE[ULONG32(cbLinesSubsectionEstimate)];
+ LPBYTE pbLinesSubsectionCur = rgbLinesSubsection;
+
+ // * (1) DWORD = CV_SIGNATURE_C13 -- the usual subsection signature DWORD
+ *((DWORD *)pbLinesSubsectionCur) = CV_SIGNATURE_C13;
+ pbLinesSubsectionCur += sizeof(DWORD);
+
+ // * (2) CV_DebugSSubsectionHeader_t
+ CV_DebugSSubsectionHeader_t * pSubSectHeader = (CV_DebugSSubsectionHeader_t *)pbLinesSubsectionCur;
+ memset(pSubSectHeader, 0, sizeof(*pSubSectHeader));
+ pSubSectHeader->type = type;
+ *ppSubSectHeader = pSubSectHeader;
+ // pSubSectHeader->cblen to be filled in later once we know the size
+ pbLinesSubsectionCur += sizeof(*pSubSectHeader);
+
+ // * (3) CV_DebugSLinesHeader_t
+ CV_DebugSLinesHeader_t * pLinesHeader = (CV_DebugSLinesHeader_t *)pbLinesSubsectionCur;
+ memset(pLinesHeader, 0, sizeof(*pLinesHeader));
+ pLinesHeader->offCon = ulCodeStartOffset;
+ pLinesHeader->segCon = m_iCodeSection;
+ pLinesHeader->flags = 0; // 0 means line info, but not column info, is included
+ pLinesHeader->cbCon = cbCode;
+ *ppLinesHeader = pLinesHeader;
+ pbLinesSubsectionCur += sizeof(*pLinesHeader);
+ *ppbLinesSubsectionCur = pbLinesSubsectionCur;
+ return rgbLinesSubsection;
+}
+
+//---------------------------------------------------------------------------------------
+//
// Helper called by NGenMethodLinesPdbWriter::WritePDBData to do the actual PDB writing of a single
// lines-subsection. This is called once for the hot region, and once for the cold
// region, of a given method that has been split. That means you get two
@@ -4126,7 +4385,7 @@ HRESULT NGenMethodLinesPdbWriter::WritePDBData()
// m_rgIlNativeMap[rgMapIndexPairs[i].m_iIlNativeMap].nativeOffset increases with i.
//
-HRESULT NGenMethodLinesPdbWriter::WriteLinesSubsection(
+HRESULT NGenMethodLinesPdbWriter::WriteDebugSLinesSubsection(
ULONG32 ulCodeStartOffset,
ULONG32 cbCode,
MapIndexPair * rgMapIndexPairs,
@@ -4153,40 +4412,31 @@ HRESULT NGenMethodLinesPdbWriter::WriteLinesSubsection(
HRESULT hr;
- UINT64 cbLinesSubsectionEstimate =
- sizeof(DWORD) +
- sizeof(CV_DebugSSubsectionHeader_t) +
- sizeof(CV_DebugSLinesHeader_t) +
- // Worst case: assume each sequence point will require its own
- // CV_DebugSLinesFileBlockHeader_t
- (cMapIndexPairs * (sizeof(CV_DebugSLinesFileBlockHeader_t) + sizeof(CV_Line_t)));
- if (!FitsIn<ULONG32>(cbLinesSubsectionEstimate))
+
+ CV_DebugSSubsectionHeader_t * pSubSectHeader = NULL;
+ CV_DebugSLinesHeader_t * pLinesHeader = NULL;
+ CV_DebugSLinesFileBlockHeader_t * LinesFileBlockHeader = NULL;
+
+ // the InitDebugLinesHeaderSection will help us taking care of
+ // * (1) DWORD = CV_SIGNATURE_C13
+ // * (2) CV_DebugSSubsectionHeader_t
+ // * (3) CV_DebugSLinesHeader_t
+ LPBYTE pbLinesSubsectionCur;
+ LPBYTE prgbLinesSubsection = InitDebugLinesHeaderSection(
+ DEBUG_S_LINES,
+ ulCodeStartOffset,
+ cbCode,
+ cMapIndexPairs,
+ &pSubSectHeader,
+ &pLinesHeader,
+ &pbLinesSubsectionCur);
+
+ if (pbLinesSubsectionCur == NULL)
{
return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
}
-
- NewArrayHolder<BYTE> rgbLinesSubsection(new BYTE[ULONG32(cbLinesSubsectionEstimate)]);
- LPBYTE pbLinesSubsectionCur = rgbLinesSubsection;
- // * (1) DWORD = CV_SIGNATURE_C13 -- the usual subsection signature DWORD
- *((DWORD *) pbLinesSubsectionCur) = CV_SIGNATURE_C13;
- pbLinesSubsectionCur += sizeof(DWORD);
-
- // * (2) CV_DebugSSubsectionHeader_t
- CV_DebugSSubsectionHeader_t * pSubSectHeader = (CV_DebugSSubsectionHeader_t *) pbLinesSubsectionCur;
- memset(pSubSectHeader, 0, sizeof(*pSubSectHeader));
- pSubSectHeader->type = DEBUG_S_LINES;
- // pSubSectHeader->cblen to be filled in later once we know the size
- pbLinesSubsectionCur += sizeof(*pSubSectHeader);
-
- // * (3) CV_DebugSLinesHeader_t
- CV_DebugSLinesHeader_t * pLinesHeader = (CV_DebugSLinesHeader_t *) pbLinesSubsectionCur;
- memset(pLinesHeader, 0, sizeof(*pLinesHeader));
- pLinesHeader->offCon = ulCodeStartOffset;
- pLinesHeader->segCon = m_iCodeSection;
- pLinesHeader->flags = 0; // 0 means line info, but not column info, is included
- pLinesHeader->cbCon = cbCode;
- pbLinesSubsectionCur += sizeof(*pLinesHeader);
+ NewArrayHolder<BYTE> rgbLinesSubsection(prgbLinesSubsection);
// The loop below takes care of
// * (4) CV_DebugSLinesFileBlockHeader_t
@@ -4195,10 +4445,12 @@ HRESULT NGenMethodLinesPdbWriter::WriteLinesSubsection(
BOOL fAtLeastOneBlockWritten = FALSE;
CV_DebugSLinesFileBlockHeader_t * pLinesFileBlockHeader = NULL;
CV_Line_t * pLineCur = NULL;
+ CV_Line_t * pLinePrev = NULL;
CV_Line_t * pLineBlockStart = NULL;
BOOL fBeginNewBlock = TRUE;
ULONG32 iSeqPointsPrev = (ULONG32) -1;
DWORD dwNativeOffsetPrev = (DWORD) -1;
+ DWORD ilOffsetPrev = (DWORD) -1;
WCHAR wszURLPrev[MAX_LONGPATH];
memset(&wszURLPrev, 0, sizeof(wszURLPrev));
LPBYTE pbEnd = NULL;
@@ -4212,13 +4464,24 @@ HRESULT NGenMethodLinesPdbWriter::WriteLinesSubsection(
// offset mapping. PDB format frowns on that. Since rgMapIndexPairs is being
// iterated in native offset order, it's easy to find these dupes right now, and
// skip all but the first map containing a given IP offset.
- if (m_rgIlNativeMap[iIlNativeMap].nativeOffset == dwNativeOffsetPrev)
- {
- // Found a native offset dupe. Since we've already assigned
- // dwNativeOffsetPrev, ignore the current map entry
- continue;
+ if (pLinePrev != NULL && m_rgIlNativeMap[iIlNativeMap].nativeOffset == pLinePrev->offset)
+ {
+ if (ilOffsetPrev == kUnmappedIP)
+ {
+ // if the previous IL offset is kUnmappedIP, then we should rewrite it.
+ pLineCur = pLinePrev;
+ }
+ else if (iSeqPoints != kUnmappedIP &&
+ m_rgilOffsets[iSeqPoints] < ilOffsetPrev)
+ {
+ pLineCur = pLinePrev;
+ }
+ else
+ {
+ // Found a native offset dupe, ignore the current map entry
+ continue;
+ }
}
- dwNativeOffsetPrev = m_rgIlNativeMap[iIlNativeMap].nativeOffset;
if ((iSeqPoints != kUnmappedIP) && (iSeqPoints != iSeqPointsPrev))
{
@@ -4334,6 +4597,8 @@ HRESULT NGenMethodLinesPdbWriter::WriteLinesSubsection(
m_rgnLineStarts[iSeqPoints];
pLineCur->deltaLineEnd = 0;
pLineCur->fStatement = 1;
+ ilOffsetPrev = (iSeqPoints == kUnmappedIP) ? kUnmappedIP : m_rgilOffsets[iSeqPoints];
+ pLinePrev = pLineCur;
pLineCur++;
} // for (ULONG32 iMapIndexPairs=0; iMapIndexPairs < cMapIndexPairs; iMapIndexPairs++)
@@ -4371,6 +4636,183 @@ HRESULT NGenMethodLinesPdbWriter::WriteLinesSubsection(
return S_OK;
}
+//---------------------------------------------------------------------------------------
+//
+// Helper called by NGenMethodLinesPdbWriter::WriteNativeILMapPDBData to do the actual PDB writing of a single
+// lines-subsection. This is called once for the hot region, and once for the cold
+// region, of a given method that has been split. That means you get two
+// lines-subsections for split methods.
+//
+// Arguments:
+// * ulCodeStartOffset - Offset relative to the code section, or where this region
+// of code begins
+// * cbCode - Size in bytes of this region of code
+// * rgIlNativeMap - IL to Native map array.
+// * rgILNativeMapAdjustSize - the number of elements we need to read in rgILNativeMap.
+//
+
+HRESULT NGenMethodLinesPdbWriter::WriteDebugSILLinesSubsection(
+ ULONG32 ulCodeStartOffset,
+ ULONG32 cbCode,
+ ICorDebugInfo::OffsetMapping * rgIlNativeMap,
+ ULONG32 rgILNativeMapAdjustSize)
+{
+ STANDARD_VM_CONTRACT;
+
+ // The lines subsection of the PDB (i.e., "DEBUG_S_IL_LINES"), is a blob consisting of a
+ // few structs stacked one after the other:
+ //
+ // * (1) DWORD = CV_SIGNATURE_C13 -- the usual subsection signature DWORD
+ // * (2) CV_DebugSSubsectionHeader_t -- the usual subsection header, with type =
+ // DEBUG_S_LINES
+ // * (3) CV_DebugSLinesHeader_t -- a single header for the entire subsection. Its
+ // purpose is to specify the native function being described, and to specify the
+ // size of the variable-sized "blocks" that follow
+ // * (4) CV_DebugSLinesFileBlockHeader_t -- For each block, you get one of these. A
+ // block is defined by a set of sequence points that map to the same source
+ // file. While iterating through the offsets, we need to define new blocks
+ // whenever the source file changes. In C#, this typically only happens when
+ // you advance to (or away from) an unmapped IP (0xFeeFee).
+ // * (5) CV_Line_t (Line array entries) -- For each block, you get several line
+ // array entries, one entry for the beginning of each sequence point.
+
+ HRESULT hr;
+
+ CV_DebugSSubsectionHeader_t * pSubSectHeader = NULL;
+ CV_DebugSLinesHeader_t * pLinesHeader = NULL;
+ CV_DebugSLinesFileBlockHeader_t * pLinesFileBlockHeader = NULL;
+
+ // the InitDebugLinesHeaderSection will help us taking care of
+ // * (1) DWORD = CV_SIGNATURE_C13
+ // * (2) CV_DebugSSubsectionHeader_t
+ // * (3) CV_DebugSLinesHeader_t
+ LPBYTE pbLinesSubsectionCur;
+ LPBYTE prgbLinesSubsection = InitDebugLinesHeaderSection(
+ DEBUG_S_IL_LINES,
+ ulCodeStartOffset,
+ cbCode,
+ rgILNativeMapAdjustSize,
+ &pSubSectHeader,
+ &pLinesHeader,
+ &pbLinesSubsectionCur);
+
+ if (prgbLinesSubsection == NULL)
+ {
+ return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+
+ NewArrayHolder<BYTE> rgbLinesSubsection(prgbLinesSubsection);
+
+ // The loop below takes care of
+ // * (4) CV_DebugSLinesFileBlockHeader_t
+ // * (5) CV_Line_t (Line array entries)
+ //
+ CV_Line_t * pLineCur = NULL;
+ CV_Line_t * pLineBlockStart = NULL;
+ BOOL fBeginNewBlock = TRUE;
+ LPBYTE pbEnd = NULL;
+
+ pLinesFileBlockHeader = (CV_DebugSLinesFileBlockHeader_t *)pbLinesSubsectionCur;
+ // PDB structure sizes guarantee this is the case, though their docs are
+ // explicit that each lines-file block header must be 4-byte aligned.
+ _ASSERTE(IS_ALIGNED(pLinesFileBlockHeader, 4));
+
+ memset(pLinesFileBlockHeader, 0, sizeof(*pLinesFileBlockHeader));
+ char szURL[MAX_PATH];
+ int cbWritten = WideCharToMultiByte(
+ CP_UTF8,
+ 0, // dwFlags
+ UNKNOWN_SOURCE_FILE_PATH,
+ -1, // i.e., input is NULL-terminated
+ szURL, // output: UTF8 string starts here
+ _countof(szURL), // Available space
+ NULL, // lpDefaultChar
+ NULL // lpUsedDefaultChar
+ );
+ _ASSERTE(cbWritten > 0);
+ DocNameOffsets docNameOffsets;
+ m_pDocNameToOffsetMap->Lookup(szURL, &docNameOffsets);
+ pLinesFileBlockHeader->offFile = docNameOffsets.m_dwChksumTableOffset;
+ // pLinesFileBlockHeader->nLines to be filled in when block is complete
+ // pLinesFileBlockHeader->cbBlock to be filled in when block is complete
+
+ pLineCur = (CV_Line_t *)(pLinesFileBlockHeader + 1);
+ pLineBlockStart = pLineCur;
+ CV_Line_t * pLinePrev = NULL;
+
+ for (ULONG32 iINativeMap = 0;iINativeMap < rgILNativeMapAdjustSize; iINativeMap++)
+ {
+ if ((rgIlNativeMap[iINativeMap].ilOffset == NO_MAPPING) ||
+ (rgIlNativeMap[iINativeMap].ilOffset == PROLOG) ||
+ (rgIlNativeMap[iINativeMap].ilOffset == EPILOG))
+ {
+ rgIlNativeMap[iINativeMap].ilOffset = kUnmappedIP;
+ }
+
+ // Sometimes the JIT manager will give us duplicate native offset in the IL-to-native
+ // offset mapping. PDB format frowns on that. Since rgMapIndexPairs is being
+ // iterated in native offset order, it's easy to find these dupes right now, and
+ // skip all but the first map containing a given IP offset.
+ if (pLinePrev != NULL &&
+ rgIlNativeMap[iINativeMap].nativeOffset == pLinePrev->offset)
+ {
+ if (pLinePrev->linenumStart == kUnmappedIP)
+ {
+ // if the previous IL offset is kUnmappedIP, then we should rewrite it.
+ pLineCur = pLinePrev;
+ }
+ else if (rgIlNativeMap[iINativeMap].ilOffset != kUnmappedIP &&
+ rgIlNativeMap[iINativeMap].ilOffset < pLinePrev->linenumStart)
+ {
+ pLineCur = pLinePrev;
+ }
+ else
+ {
+ // Found a native offset dupe, ignore the current map entry
+ continue;
+ }
+ }
+
+ pLineCur->linenumStart = rgIlNativeMap[iINativeMap].ilOffset;
+
+ pLineCur->offset = rgIlNativeMap[iINativeMap].nativeOffset;
+ pLineCur->fStatement = 1;
+ pLineCur->deltaLineEnd = 0;
+ pLinePrev = pLineCur;
+ pLineCur++;
+ }
+
+ if (pLineCur == NULL)
+ {
+ // There were no lines data for this function, so don't write anything
+ return S_OK;
+ }
+
+ if (!FinalizeLinesFileBlock(pLinesFileBlockHeader, pLineBlockStart, pLineCur
+#ifdef _DEBUG
+ , true
+#endif
+ ))
+ {
+ return S_OK;
+ }
+
+ // Now that we know pSubSectHeader->cbLen, fill it in
+ pSubSectHeader->cbLen = CV_off32_t(LPBYTE(pLineCur) - LPBYTE(pLinesHeader));
+
+ // Subsection is now filled out, so add it.
+ hr = m_pWriter->ModAddSymbols(
+ m_pMod,
+ rgbLinesSubsection,
+
+ // The size we pass here is the size of the entire byte array that we pass in.
+ long(LPBYTE(pLineCur) - rgbLinesSubsection));
+
+ if (FAILED(hr))
+ return hr;
+
+ return S_OK;
+}
//---------------------------------------------------------------------------------------
//
@@ -4391,7 +4833,11 @@ HRESULT NGenMethodLinesPdbWriter::WriteLinesSubsection(
BOOL NGenMethodLinesPdbWriter::FinalizeLinesFileBlock(
CV_DebugSLinesFileBlockHeader_t * pLinesFileBlockHeader,
CV_Line_t * pLineBlockStart,
- CV_Line_t * pLineBlockAfterEnd)
+ CV_Line_t * pLineBlockAfterEnd
+#ifdef _DEBUG
+ , BOOL ignorekUnmappedIPCheck
+#endif
+ )
{
LIMITED_METHOD_CONTRACT;
@@ -4422,12 +4868,15 @@ BOOL NGenMethodLinesPdbWriter::FinalizeLinesFileBlock(
// at the first file), but the offset will generally be ignored by the PDB
// reader.
#ifdef _DEBUG
+ {
+ if (!ignorekUnmappedIPCheck)
{
for (CV_Line_t * pLineCur = pLineBlockStart; pLineCur < pLineBlockAfterEnd; pLineCur++)
{
_ASSERTE(pLineCur->linenumStart == kUnmappedIP);
}
}
+ }
#endif // _DEBUG
pLinesFileBlockHeader->offFile = 0;
}
diff --git a/src/vm/debugdebugger.h b/src/vm/debugdebugger.h
index 56c3cf13fe..dc71a3d3db 100644
--- a/src/vm/debugdebugger.h
+++ b/src/vm/debugdebugger.h
@@ -151,6 +151,8 @@ private:
void InitPass2();
};
+public:
+
struct GetStackFramesData {
// Used for the integer-skip version
@@ -186,9 +188,6 @@ private:
}
};
-
-public:
-
static FCDECL3(void,
GetStackFramesInternal,
StackFrameHelper* pStackFrameHelper,
@@ -196,6 +195,8 @@ public:
Object* pException
);
+ static void GetStackFramesFromException(OBJECTREF * e, GetStackFramesData *pData, PTRARRAYREF * pDynamicMethodArray = NULL);
+
#ifndef DACCESS_COMPILE
// the DAC directly calls GetStackFramesFromException
private:
@@ -205,8 +206,6 @@ private:
static void GetStackFrames(Frame *pStartFrame, void* pStopStack, GetStackFramesData *pData);
- static void GetStackFramesFromException(OBJECTREF * e, GetStackFramesData *pData, PTRARRAYREF * pDynamicMethodArray = NULL);
-
static StackWalkAction GetStackFramesCallback(CrawlFrame* pCf, VOID* data);
};
diff --git a/src/vm/eepolicy.cpp b/src/vm/eepolicy.cpp
index 5aeb675527..b41b029b3c 100644
--- a/src/vm/eepolicy.cpp
+++ b/src/vm/eepolicy.cpp
@@ -1334,7 +1334,6 @@ void EEPolicy::LogFatalError(UINT exitCode, UINT_PTR address, LPCWSTR pszMessage
// (in SystemDomain::Init) finished. See Dev10 Bug 677432 for the detail.
if (ohException != NULL)
{
-#ifdef FEATURE_WINDOWSPHONE
// for fail-fast, if there's a LTO available then use that as the inner exception object
// for the FEEE we'll be reporting. this can help the Watson back-end to generate better
// buckets for apps that call Environment.FailFast() and supply an exception object.
@@ -1345,7 +1344,6 @@ void EEPolicy::LogFatalError(UINT exitCode, UINT_PTR address, LPCWSTR pszMessage
EXCEPTIONREF curEx = (EXCEPTIONREF)ObjectFromHandle(ohException);
curEx->SetInnerException(lto);
}
-#endif // FEATURE_WINDOWSPHONE
pThread->SetLastThrownObject(ObjectFromHandle(ohException), TRUE);
}
diff --git a/src/vm/eetoprofinterfaceimpl.cpp b/src/vm/eetoprofinterfaceimpl.cpp
index 09553fa6d1..232fc6d883 100644
--- a/src/vm/eetoprofinterfaceimpl.cpp
+++ b/src/vm/eetoprofinterfaceimpl.cpp
@@ -409,6 +409,7 @@ EEToProfInterfaceImpl::EEToProfInterfaceImpl() :
m_pCallback4(NULL),
m_pCallback5(NULL),
m_pCallback6(NULL),
+ m_pCallback7(NULL),
m_hmodProfilerDLL(NULL),
m_fLoadedViaAttach(FALSE),
m_pProfToEE(NULL),
@@ -659,21 +660,24 @@ HRESULT EEToProfInterfaceImpl::CreateProfiler(
m_hmodProfilerDLL = hmodProfilerDLL.Extract();
hmodProfilerDLL = NULL;
- // The profiler may optionally support ICorProfilerCallback3,4,5,6. Let's check.
-
- ReleaseHolder<ICorProfilerCallback6> pCallback6;
+ // The profiler may optionally support ICorProfilerCallback3,4,5,6,7. Let's check.
+
+ ReleaseHolder<ICorProfilerCallback7> pCallback7;
hr = m_pCallback2->QueryInterface(
- IID_ICorProfilerCallback6,
- (LPVOID *) &pCallback6);
- if (SUCCEEDED(hr) && (pCallback6 != NULL))
+ IID_ICorProfilerCallback7,
+ (LPVOID *)&pCallback7);
+ if (SUCCEEDED(hr) && (pCallback7 != NULL))
{
// Nifty. Transfer ownership to this class
- _ASSERTE(m_pCallback6 == NULL);
- m_pCallback6 = pCallback6.Extract();
- pCallback6 = NULL;
+ _ASSERTE(m_pCallback7 == NULL);
+ m_pCallback7 = pCallback7.Extract();
+ pCallback7 = NULL;
- // And while we're at it, we must now also have an ICorProfilerCallback3,4,5
+ // And while we're at it, we must now also have an ICorProfilerCallback3,4,5,6
// due to inheritance relationship of the interfaces
+ _ASSERTE(m_pCallback6 == NULL);
+ m_pCallback6 = static_cast<ICorProfilerCallback6 *>(m_pCallback7);
+ m_pCallback6->AddRef();
_ASSERTE(m_pCallback5 == NULL);
m_pCallback5 = static_cast<ICorProfilerCallback5 *>(m_pCallback6);
@@ -687,6 +691,36 @@ HRESULT EEToProfInterfaceImpl::CreateProfiler(
m_pCallback3 = static_cast<ICorProfilerCallback3 *>(m_pCallback4);
m_pCallback3->AddRef();
}
+
+ if (m_pCallback6 == NULL)
+ {
+ ReleaseHolder<ICorProfilerCallback6> pCallback6;
+ hr = m_pCallback2->QueryInterface(
+ IID_ICorProfilerCallback6,
+ (LPVOID *)&pCallback6);
+ if (SUCCEEDED(hr) && (pCallback6 != NULL))
+ {
+ // Nifty. Transfer ownership to this class
+ _ASSERTE(m_pCallback6 == NULL);
+ m_pCallback6 = pCallback6.Extract();
+ pCallback6 = NULL;
+
+ // And while we're at it, we must now also have an ICorProfilerCallback3,4,5
+ // due to inheritance relationship of the interfaces
+
+ _ASSERTE(m_pCallback5 == NULL);
+ m_pCallback5 = static_cast<ICorProfilerCallback5 *>(m_pCallback6);
+ m_pCallback5->AddRef();
+
+ _ASSERTE(m_pCallback4 == NULL);
+ m_pCallback4 = static_cast<ICorProfilerCallback4 *>(m_pCallback5);
+ m_pCallback4->AddRef();
+
+ _ASSERTE(m_pCallback3 == NULL);
+ m_pCallback3 = static_cast<ICorProfilerCallback3 *>(m_pCallback4);
+ m_pCallback3->AddRef();
+ }
+ }
if (m_pCallback5 == NULL)
{
@@ -828,6 +862,13 @@ EEToProfInterfaceImpl::~EEToProfInterfaceImpl()
m_pCallback6 = NULL;
}
+ if (m_pCallback7 != NULL)
+ {
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ m_pCallback7->Release();
+ m_pCallback7 = NULL;
+ }
+
// Only unload the V4 profiler if this is not part of shutdown. This protects
// Whidbey profilers that aren't used to being FreeLibrary'd.
if (fIsV4Profiler && !g_fEEShutDown)
@@ -2299,6 +2340,12 @@ HRESULT EEToProfInterfaceImpl::SetEventMask(DWORD dwEventMask, DWORD dwEventMask
return CORPROF_E_CALLBACK6_REQUIRED;
}
+ if (((dwEventMaskHigh & COR_PRF_HIGH_IN_MEMORY_SYMBOLS_UPDATED) != 0) &&
+ !IsCallback7Supported())
+ {
+ return CORPROF_E_CALLBACK7_REQUIRED;
+ }
+
// Now save the modified masks
g_profControlBlock.dwEventMask = dwEventMask;
g_profControlBlock.dwEventMaskHigh = dwEventMaskHigh;
@@ -3649,6 +3696,45 @@ HRESULT EEToProfInterfaceImpl::ModuleAttachedToAssembly(
}
}
+HRESULT EEToProfInterfaceImpl::ModuleInMemorySymbolsUpdated(ModuleID moduleId)
+{
+ CONTRACTL
+ {
+ // Yay!
+ NOTHROW;
+
+ // Yay!
+ GC_TRIGGERS;
+
+ // Yay!
+ MODE_PREEMPTIVE;
+
+ // Yay!
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO10,
+ "**PROF: ModuleInMemorySymbolsUpdated. moduleId: 0x%p.\n",
+ moduleId
+ ));
+ HRESULT hr = S_OK;
+
+ _ASSERTE(IsCallback7Supported());
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ hr = m_pCallback7->ModuleInMemorySymbolsUpdated(moduleId);
+ }
+
+ return hr;
+}
+
//---------------------------------------------------------------------------------------
// CLASS EVENTS
//
diff --git a/src/vm/eetoprofinterfaceimpl.h b/src/vm/eetoprofinterfaceimpl.h
index 3de355549d..c87ebfc2ef 100644
--- a/src/vm/eetoprofinterfaceimpl.h
+++ b/src/vm/eetoprofinterfaceimpl.h
@@ -55,6 +55,7 @@ public:
BOOL IsCallback4Supported();
BOOL IsCallback5Supported();
BOOL IsCallback6Supported();
+ BOOL IsCallback7Supported();
HRESULT SetEventMask(DWORD dwEventMask, DWORD dwEventMaskHigh);
@@ -230,6 +231,9 @@ public:
ModuleID moduleId,
AssemblyID AssemblyId);
+ HRESULT ModuleInMemorySymbolsUpdated(
+ ModuleID moduleId);
+
//
// Class Events
//
@@ -525,12 +529,13 @@ private:
// Pointer to the profiler's implementation of the callback interface(s).
// Profilers MUST support ICorProfilerCallback2.
- // Profilers MAY optionally support ICorProfilerCallback3,4,5
+ // Profilers MAY optionally support ICorProfilerCallback3,4,5,6,7
ICorProfilerCallback2 * m_pCallback2;
ICorProfilerCallback3 * m_pCallback3;
ICorProfilerCallback4 * m_pCallback4;
ICorProfilerCallback5 * m_pCallback5;
ICorProfilerCallback6 * m_pCallback6;
+ ICorProfilerCallback7 * m_pCallback7;
HMODULE m_hmodProfilerDLL;
BOOL m_fLoadedViaAttach;
diff --git a/src/vm/eetoprofinterfaceimpl.inl b/src/vm/eetoprofinterfaceimpl.inl
index c2c98f9a63..f836756c7c 100644
--- a/src/vm/eetoprofinterfaceimpl.inl
+++ b/src/vm/eetoprofinterfaceimpl.inl
@@ -60,6 +60,12 @@ inline BOOL EEToProfInterfaceImpl::IsCallback6Supported()
return (m_pCallback6 != NULL);
}
+inline BOOL EEToProfInterfaceImpl::IsCallback7Supported()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_pCallback7 != NULL);
+}
+
inline FunctionIDMapper * EEToProfInterfaceImpl::GetFunctionIDMapper()
{
LIMITED_METHOD_CONTRACT;
diff --git a/src/vm/eventreporter.cpp b/src/vm/eventreporter.cpp
index 747a0454ef..1657bc72eb 100644
--- a/src/vm/eventreporter.cpp
+++ b/src/vm/eventreporter.cpp
@@ -17,6 +17,7 @@
#include "utilcode.h"
#include "eventreporter.h"
#include "typestring.h"
+#include "debugdebugger.h"
#include "../dlls/mscorrc/resource.h"
@@ -596,6 +597,73 @@ void LogCallstackForEventReporter(EventReporter& reporter)
LogCallstackForEventReporterWorker(reporter);
}
+void ReportExceptionStackHelper(OBJECTREF exObj, EventReporter& reporter, SmallStackSString& wordAt, int recursionLimit)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ }
+ CONTRACTL_END;
+
+ if (exObj == NULL || recursionLimit == 0)
+ {
+ return;
+ }
+
+ struct
+ {
+ OBJECTREF exObj;
+ EXCEPTIONREF ex;
+ STRINGREF remoteStackTraceString;
+ } gc;
+ ZeroMemory(&gc, sizeof(gc));
+ gc.exObj = exObj;
+ gc.ex = (EXCEPTIONREF)exObj;
+
+ GCPROTECT_BEGIN(gc);
+
+ ReportExceptionStackHelper((gc.ex)->GetInnerException(), reporter, wordAt, recursionLimit - 1);
+
+ StackSString exTypeStr;
+ TypeString::AppendType(exTypeStr, TypeHandle((gc.ex)->GetMethodTable()), TypeString::FormatNamespace | TypeString::FormatFullInst);
+ reporter.AddDescription(exTypeStr);
+
+ gc.remoteStackTraceString = (gc.ex)->GetRemoteStackTraceString();
+ if (gc.remoteStackTraceString != NULL && gc.remoteStackTraceString->GetStringLength())
+ {
+ SString remoteStackTrace;
+ gc.remoteStackTraceString->GetSString(remoteStackTrace);
+
+ // If source info is contained, trim it
+ StripFileInfoFromStackTrace(remoteStackTrace);
+
+ reporter.AddStackTrace(remoteStackTrace);
+ }
+
+ DebugStackTrace::GetStackFramesData stackFramesData;
+ stackFramesData.pDomain = NULL;
+ stackFramesData.skip = 0;
+ stackFramesData.NumFramesRequested = 0;
+
+ DebugStackTrace::GetStackFramesFromException(&(gc.exObj), &stackFramesData);
+
+ for (int j = 0; j < stackFramesData.cElements; j++)
+ {
+ StackSString str;
+ str = wordAt;
+ TypeString::AppendMethodInternal(str, stackFramesData.pElements[j].pFunc, TypeString::FormatNamespace | TypeString::FormatFullInst | TypeString::FormatSignature);
+ reporter.AddStackTrace(str);
+ }
+
+ StackSString separator(L""); // This will result in blank line
+ reporter.AddStackTrace(separator);
+
+ GCPROTECT_END();
+}
+
+
//---------------------------------------------------------------------------------------
//
// Generate an EventLog entry for unhandled exception.
@@ -609,7 +677,7 @@ void LogCallstackForEventReporter(EventReporter& reporter)
void DoReportForUnhandledException(PEXCEPTION_POINTERS pExceptionInfo)
{
WRAPPER_NO_CONTRACT;
-
+
if (ShouldLogInEventLog())
{
Thread *pThread = GetThread();
@@ -623,7 +691,6 @@ void DoReportForUnhandledException(PEXCEPTION_POINTERS pExceptionInfo)
struct
{
OBJECTREF throwable;
- STRINGREF remoteStackTraceString;
STRINGREF originalExceptionMessage;
} gc;
ZeroMemory(&gc, sizeof(gc));
@@ -677,25 +744,28 @@ void DoReportForUnhandledException(PEXCEPTION_POINTERS pExceptionInfo)
else
#endif // FEATURE_CORECLR
{
- // Add the details of the exception object to the event reporter.
- TypeString::AppendType(s, TypeHandle(gc.throwable->GetMethodTable()), TypeString::FormatNamespace|TypeString::FormatFullInst);
- reporter.AddDescription(s);
- reporter.BeginStackTrace();
if (IsException(gc.throwable->GetMethodTable()))
{
- gc.remoteStackTraceString = ((EXCEPTIONREF)gc.throwable)->GetRemoteStackTraceString();
- if (gc.remoteStackTraceString != NULL && gc.remoteStackTraceString->GetStringLength())
+ SmallStackSString wordAt;
+ if (!wordAt.LoadResource(CCompRC::Optional, IDS_ER_WORDAT))
{
- SString remoteStackTrace;
- gc.remoteStackTraceString->GetSString(remoteStackTrace);
-
- // If source info is contained, trim it
- StripFileInfoFromStackTrace(remoteStackTrace);
-
- reporter.AddStackTrace(remoteStackTrace);
+ wordAt.Set(W(" at"));
}
+ else
+ {
+ wordAt.Insert(wordAt.Begin(), W(" "));
+ }
+ wordAt += W(" ");
+
+ ReportExceptionStackHelper(gc.throwable, reporter, wordAt, /* recursionLimit = */10);
+ }
+ else
+ {
+ TypeString::AppendType(s, TypeHandle(gc.throwable->GetMethodTable()), TypeString::FormatNamespace | TypeString::FormatFullInst);
+ reporter.AddDescription(s);
+ reporter.BeginStackTrace();
+ LogCallstackForEventReporterWorker(reporter);
}
- LogCallstackForEventReporterWorker(reporter);
}
GCPROTECT_END();
diff --git a/src/vm/i386/stublinkerx86.cpp b/src/vm/i386/stublinkerx86.cpp
index b86151243c..c3ac8316da 100644
--- a/src/vm/i386/stublinkerx86.cpp
+++ b/src/vm/i386/stublinkerx86.cpp
@@ -6736,8 +6736,16 @@ void FixupPrecode::Fixup(DataImage *image, MethodDesc * pMD)
#endif // HAS_FIXUP_PRECODE
+#endif // !DACCESS_COMPILE
+
+
#ifdef HAS_THISPTR_RETBUF_PRECODE
+// rel32 jmp target that points back to the jump (infinite loop).
+// Used to mark uninitialized ThisPtrRetBufPrecode target
+#define REL32_JMP_SELF (-5)
+
+#ifndef DACCESS_COMPILE
void ThisPtrRetBufPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator)
{
WRAPPER_NO_CONTRACT;
@@ -6762,13 +6770,38 @@ void ThisPtrRetBufPrecode::Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocat
m_jmp = X86_INSTR_JMP_REL32; // jmp rel32
m_pMethodDesc = (TADDR)pMD;
- if (pLoaderAllocator != NULL)
+ // This precode is never patched lazily - avoid unnecessary jump stub allocation
+ m_rel32 = REL32_JMP_SELF;
+}
+
+BOOL ThisPtrRetBufPrecode::SetTargetInterlocked(TADDR target, TADDR expected)
+{
+ CONTRACTL
{
- m_rel32 = rel32UsingJumpStub(&m_rel32,
- GetPreStubEntryPoint(), NULL /* pMD */, pLoaderAllocator);
+ THROWS;
+ GC_TRIGGERS;
}
-}
+ CONTRACTL_END;
-#endif // HAS_THISPTR_RETBUF_PRECODE
+ // This precode is never patched lazily - the interlocked semantics is not required.
+ _ASSERTE(m_rel32 == REL32_JMP_SELF);
+
+ // Use pMD == NULL to allocate the jump stub in non-dynamic heap that has the same lifetime as the precode itself
+ m_rel32 = rel32UsingJumpStub(&m_rel32, target, NULL /* pMD */, ((MethodDesc *)GetMethodDesc())->GetLoaderAllocatorForCode());
+ return TRUE;
+}
#endif // !DACCESS_COMPILE
+
+PCODE ThisPtrRetBufPrecode::GetTarget()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // This precode is never patched lazily - pretend that the uninitialized m_rel32 points to prestub
+ if (m_rel32 == REL32_JMP_SELF)
+ return GetPreStubEntryPoint();
+
+ return rel32Decode(PTR_HOST_MEMBER_TADDR(ThisPtrRetBufPrecode, this, m_rel32));
+}
+
+#endif // HAS_THISPTR_RETBUF_PRECODE
diff --git a/src/vm/i386/stublinkerx86.h b/src/vm/i386/stublinkerx86.h
index 5ed300357c..b7ea73fbc2 100644
--- a/src/vm/i386/stublinkerx86.h
+++ b/src/vm/i386/stublinkerx86.h
@@ -772,25 +772,9 @@ struct ThisPtrRetBufPrecode {
return m_pMethodDesc;
}
- PCODE GetTarget()
- {
- LIMITED_METHOD_DAC_CONTRACT;
-
- return rel32Decode(PTR_HOST_MEMBER_TADDR(ThisPtrRetBufPrecode, this, m_rel32));
- }
-
- BOOL SetTargetInterlocked(TADDR target, TADDR expected)
- {
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- }
- CONTRACTL_END;
+ PCODE GetTarget();
- EnsureWritableExecutablePages(&m_rel32);
- return rel32SetInterlocked(&m_rel32, target, expected, (MethodDesc*)GetMethodDesc());
- }
+ BOOL SetTargetInterlocked(TADDR target, TADDR expected);
};
IN_WIN32(static_assert_no_msg(offsetof(ThisPtrRetBufPrecode, m_movArg1Scratch) + 1 == OFFSETOF_PRECODE_TYPE);)
typedef DPTR(ThisPtrRetBufPrecode) PTR_ThisPtrRetBufPrecode;
diff --git a/src/vm/pefile.cpp b/src/vm/pefile.cpp
index 517092549c..f07cd051c9 100644
--- a/src/vm/pefile.cpp
+++ b/src/vm/pefile.cpp
@@ -1681,6 +1681,17 @@ void PEFile::SetNativeImage(PEImage *image)
m_nativeImage->Load();
m_nativeImage->AllocateLazyCOWPages();
+#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
+ static ConfigDWORD configNGenReserveForJumpStubs;
+ int percentReserveForJumpStubs = configNGenReserveForJumpStubs.val(CLRConfig::INTERNAL_NGenReserveForJumpStubs);
+ if (percentReserveForJumpStubs != 0)
+ {
+ PEImageLayout * pLayout = image->GetLoadedLayout();
+ ExecutionManager::GetEEJitManager()->EnsureJumpStubReserve((BYTE *)pLayout->GetBase(), pLayout->GetVirtualSize(),
+ percentReserveForJumpStubs * (pLayout->GetVirtualSize() / 100));
+ }
+#endif
+
ExternalLog(LL_INFO100, W("Attempting to use native image %s."), image->GetPath().GetUnicode());
RETURN;
}
diff --git a/src/vm/proftoeeinterfaceimpl.cpp b/src/vm/proftoeeinterfaceimpl.cpp
index 293d70c596..0b2ebe56f6 100644
--- a/src/vm/proftoeeinterfaceimpl.cpp
+++ b/src/vm/proftoeeinterfaceimpl.cpp
@@ -582,6 +582,10 @@ COM_METHOD ProfToEEInterfaceImpl::QueryInterface(REFIID id, void ** pInterface)
{
*pInterface = static_cast<ICorProfilerInfo6 *>(this);
}
+ else if (id == IID_ICorProfilerInfo7)
+ {
+ *pInterface = static_cast<ICorProfilerInfo7 *>(this);
+ }
else if (id == IID_IUnknown)
{
*pInterface = static_cast<IUnknown *>(static_cast<ICorProfilerInfo *>(this));
@@ -9123,7 +9127,188 @@ HRESULT ProfToEEInterfaceImpl::EnumNgenModuleMethodsInliningThisMethod(
return hr;
}
+HRESULT ProfToEEInterfaceImpl::GetInMemorySymbolsLength(
+ ModuleID moduleId,
+ DWORD* pCountSymbolBytes)
+{
+
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO10,
+ "**PROF: GetInMemorySymbolsLength.\n"));
+
+ HRESULT hr = S_OK;
+ if (pCountSymbolBytes == NULL)
+ {
+ return E_INVALIDARG;
+ }
+ *pCountSymbolBytes = 0;
+
+ Module* pModule = reinterpret_cast< Module* >(moduleId);
+ if (pModule == NULL)
+ {
+ return E_INVALIDARG;
+ }
+ if (pModule->IsBeingUnloaded())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //This method would work fine on reflection.emit, but there would be no way to know
+ //if some other thread was changing the size of the symbols before this method returned.
+ //Adding events or locks to detect/prevent changes would make the scenario workable
+ if (pModule->IsReflection())
+ {
+ return COR_PRF_MODULE_DYNAMIC;
+ }
+
+ CGrowableStream* pStream = pModule->GetInMemorySymbolStream();
+ if (pStream == NULL)
+ {
+ return S_OK;
+ }
+
+ STATSTG SizeData = { 0 };
+ hr = pStream->Stat(&SizeData, STATFLAG_NONAME);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ if (SizeData.cbSize.u.HighPart > 0)
+ {
+ return COR_E_OVERFLOW;
+ }
+ *pCountSymbolBytes = SizeData.cbSize.u.LowPart;
+
+ return S_OK;
+}
+
+HRESULT ProfToEEInterfaceImpl::ReadInMemorySymbols(
+ ModuleID moduleId,
+ DWORD symbolsReadOffset,
+ BYTE* pSymbolBytes,
+ DWORD countSymbolBytes,
+ DWORD* pCountSymbolBytesRead)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO10,
+ "**PROF: ReadInMemorySymbols.\n"));
+
+ HRESULT hr = S_OK;
+ if (pSymbolBytes == NULL)
+ {
+ return E_INVALIDARG;
+ }
+ if (pCountSymbolBytesRead == NULL)
+ {
+ return E_INVALIDARG;
+ }
+ *pCountSymbolBytesRead = 0;
+
+ Module* pModule = reinterpret_cast< Module* >(moduleId);
+ if (pModule == NULL)
+ {
+ return E_INVALIDARG;
+ }
+ if (pModule->IsBeingUnloaded())
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+
+ //This method would work fine on reflection.emit, but there would be no way to know
+ //if some other thread was changing the size of the symbols before this method returned.
+ //Adding events or locks to detect/prevent changes would make the scenario workable
+ if (pModule->IsReflection())
+ {
+ return COR_PRF_MODULE_DYNAMIC;
+ }
+
+ CGrowableStream* pStream = pModule->GetInMemorySymbolStream();
+ if (pStream == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ STATSTG SizeData = { 0 };
+ hr = pStream->Stat(&SizeData, STATFLAG_NONAME);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ if (SizeData.cbSize.u.HighPart > 0)
+ {
+ return COR_E_OVERFLOW;
+ }
+ DWORD streamSize = SizeData.cbSize.u.LowPart;
+ if (symbolsReadOffset >= streamSize)
+ {
+ return E_INVALIDARG;
+ }
+
+ *pCountSymbolBytesRead = min(streamSize - symbolsReadOffset, countSymbolBytes);
+ memcpy_s(pSymbolBytes, countSymbolBytes, ((BYTE*)pStream->GetRawBuffer().StartAddress()) + symbolsReadOffset, *pCountSymbolBytesRead);
+
+ return S_OK;
+}
+
+HRESULT ProfToEEInterfaceImpl::ApplyMetaData(
+ ModuleID moduleId)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach, (LF_CORPROF, LL_INFO1000, "**PROF: ApplyMetaData.\n"));
+
+ if (moduleId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+ HRESULT hr = S_OK;
+ EX_TRY
+ {
+ Module *pModule = (Module *)moduleId;
+ _ASSERTE(pModule != NULL);
+ if (pModule->IsBeingUnloaded())
+ {
+ hr = CORPROF_E_DATAINCOMPLETE;
+ }
+ else
+ {
+ pModule->ApplyMetaData();
+ }
+ }
+ EX_CATCH_HRESULT(hr);
+ return hr;
+}
//---------------------------------------------------------------------------------------
//
diff --git a/src/vm/proftoeeinterfaceimpl.h b/src/vm/proftoeeinterfaceimpl.h
index cae25d9dae..d256788240 100644
--- a/src/vm/proftoeeinterfaceimpl.h
+++ b/src/vm/proftoeeinterfaceimpl.h
@@ -134,7 +134,7 @@ typedef struct _PROFILER_STACK_WALK_DATA PROFILER_STACK_WALK_DATA;
// from the profiler implementation. The profiler will call back on the v-table
// to get at EE internals as required.
-class ProfToEEInterfaceImpl : public ICorProfilerInfo6
+class ProfToEEInterfaceImpl : public ICorProfilerInfo7
{
public:
@@ -524,6 +524,10 @@ public:
COM_METHOD GetEventMask2(DWORD *pdwEventsLow, DWORD *pdwEventsHigh);
+ // end ICorProfilerInfo5
+
+ // begin ICorProfilerInfo6
+
COM_METHOD EnumNgenModuleMethodsInliningThisMethod(
ModuleID inlinersModuleId,
ModuleID inlineeModuleId,
@@ -532,7 +536,25 @@ public:
ICorProfilerMethodEnum** ppEnum);
- // end ICorProfilerInfo5
+ // end ICorProfilerInfo6
+
+ // begin ICorProfilerInfo7
+
+ COM_METHOD ApplyMetaData(
+ ModuleID moduleId);
+
+ COM_METHOD GetInMemorySymbolsLength(
+ ModuleID moduleId,
+ DWORD* pCountSymbolBytes);
+
+ COM_METHOD ReadInMemorySymbols(
+ ModuleID moduleId,
+ DWORD symbolsReadOffset,
+ BYTE* pSymbolBytes,
+ DWORD countSymbolBytes,
+ DWORD* pCountSymbolBytesRead);
+
+ // end ICorProfilerInfo7
protected:
diff --git a/src/vm/threaddebugblockinginfo.cpp b/src/vm/threaddebugblockinginfo.cpp
index 6ff0bab3e7..a77c69b457 100644
--- a/src/vm/threaddebugblockinginfo.cpp
+++ b/src/vm/threaddebugblockinginfo.cpp
@@ -82,17 +82,10 @@ m_pThread(pThread)
#endif //DACCESS_COMPILE
// Holder destructor pops a blocking item off the blocking info stack
-// NOTE: optimizations are disabled to work around a codegen bug on x86
#ifndef DACCESS_COMPILE
-#ifdef _TARGET_X86_
-#pragma optimize( "", off )
-#endif // _TARGET_X86_
DebugBlockingItemHolder::~DebugBlockingItemHolder()
{
LIMITED_METHOD_CONTRACT;
m_pThread->DebugBlockingInfo.PopBlockingItem();
}
-#ifdef _TARGET_X86_
-#pragma optimize( "", on )
-#endif // _TARGET_X86_
#endif //DACCESS_COMPILE