summaryrefslogtreecommitdiff
path: root/src/vm
diff options
context:
space:
mode:
Diffstat (limited to 'src/vm')
-rw-r--r--src/vm/CMakeLists.txt13
-rw-r--r--src/vm/arm/asmhelpers.S18
-rw-r--r--src/vm/arm/asmhelpers.asm21
-rw-r--r--src/vm/arm/cgencpu.h55
-rw-r--r--src/vm/arm/stubs.cpp7
-rw-r--r--src/vm/arm64/asmhelpers.S33
-rw-r--r--src/vm/arm64/crthelpers.S420
-rw-r--r--src/vm/arm64/stubs.cpp13
-rw-r--r--src/vm/assemblyname.cpp19
-rw-r--r--src/vm/assemblynative.cpp140
-rw-r--r--src/vm/assemblynative.hpp8
-rw-r--r--src/vm/assemblyspec.cpp14
-rw-r--r--src/vm/ceeload.cpp10
-rw-r--r--src/vm/ceemain.cpp18
-rw-r--r--src/vm/class.h11
-rw-r--r--src/vm/clrex.cpp13
-rw-r--r--src/vm/codeman.cpp79
-rw-r--r--src/vm/codeman.h6
-rw-r--r--src/vm/comdependenthandle.cpp30
-rw-r--r--src/vm/comdependenthandle.h12
-rw-r--r--src/vm/compile.cpp24
-rw-r--r--src/vm/coreassemblyspec.cpp28
-rw-r--r--src/vm/corhost.cpp2
-rw-r--r--src/vm/coverage.cpp55
-rw-r--r--src/vm/coverage.h19
-rw-r--r--src/vm/dllimport.cpp14
-rw-r--r--src/vm/dwreport.cpp42
-rw-r--r--src/vm/ecalllist.h26
-rw-r--r--src/vm/eeconfig.cpp31
-rw-r--r--src/vm/eeconfig.h15
-rw-r--r--src/vm/eepolicy.cpp98
-rw-r--r--src/vm/eetoprofinterfaceimpl.cpp93
-rw-r--r--src/vm/eetoprofinterfaceimpl.h9
-rw-r--r--src/vm/eventpipe.cpp448
-rw-r--r--src/vm/eventpipe.h200
-rw-r--r--src/vm/eventpipebuffer.cpp281
-rw-r--r--src/vm/eventpipebuffer.h109
-rw-r--r--src/vm/eventpipebuffermanager.cpp808
-rw-r--r--src/vm/eventpipebuffermanager.h161
-rw-r--r--src/vm/eventpipeconfiguration.cpp594
-rw-r--r--src/vm/eventpipeconfiguration.h164
-rw-r--r--src/vm/eventpipeevent.cpp120
-rw-r--r--src/vm/eventpipeevent.h87
-rw-r--r--src/vm/eventpipeeventinstance.cpp238
-rw-r--r--src/vm/eventpipeeventinstance.h90
-rw-r--r--src/vm/eventpipefile.cpp164
-rw-r--r--src/vm/eventpipefile.h85
-rw-r--r--src/vm/eventpipejsonfile.cpp22
-rw-r--r--src/vm/eventpipejsonfile.h12
-rw-r--r--src/vm/eventpipeprovider.cpp244
-rw-r--r--src/vm/eventpipeprovider.h117
-rw-r--r--src/vm/eventtrace.cpp22
-rw-r--r--src/vm/exceptionhandling.cpp6
-rw-r--r--src/vm/fastserializableobject.h32
-rw-r--r--src/vm/fastserializer.cpp337
-rw-r--r--src/vm/fastserializer.h74
-rw-r--r--src/vm/field.cpp9
-rw-r--r--src/vm/field.h6
-rw-r--r--src/vm/i386/cgenx86.cpp24
-rw-r--r--src/vm/i386/excepcpu.h4
-rw-r--r--src/vm/i386/excepx86.cpp54
-rw-r--r--src/vm/i386/gmsx86.cpp2
-rw-r--r--src/vm/i386/unixstubs.cpp6
-rw-r--r--src/vm/jitinterface.cpp14
-rw-r--r--src/vm/method.cpp221
-rw-r--r--src/vm/method.hpp17
-rw-r--r--src/vm/methodtable.h2
-rw-r--r--src/vm/mscorlib.cpp3
-rw-r--r--src/vm/mscorlib.h6
-rw-r--r--src/vm/pefile.cpp14
-rw-r--r--src/vm/pefile.h6
-rw-r--r--src/vm/peimage.cpp6
-rw-r--r--src/vm/peimagelayout.cpp13
-rw-r--r--src/vm/precode.cpp10
-rw-r--r--src/vm/precode.h5
-rw-r--r--src/vm/prestub.cpp22
-rw-r--r--src/vm/runtimehandles.cpp4
-rw-r--r--src/vm/sampleprofiler.cpp79
-rw-r--r--src/vm/sampleprofiler.h35
-rw-r--r--src/vm/threads.cpp29
-rw-r--r--src/vm/threads.h59
-rw-r--r--src/vm/threadsuspend.cpp5
-rw-r--r--src/vm/tieredcompilation.cpp4
-rw-r--r--src/vm/typedesc.cpp20
-rw-r--r--src/vm/win32threadpool.cpp25
85 files changed, 5616 insertions, 899 deletions
diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt
index da1aa8fe62..c610d3c7a8 100644
--- a/src/vm/CMakeLists.txt
+++ b/src/vm/CMakeLists.txt
@@ -155,7 +155,6 @@ set(VM_SOURCES_WKS
comthreadpool.cpp
comutilnative.cpp
comwaithandle.cpp
- coverage.cpp
customattribute.cpp
custommarshalerinfo.cpp
dllimportcallback.cpp
@@ -165,8 +164,16 @@ set(VM_SOURCES_WKS
eepolicy.cpp
eetoprofinterfaceimpl.cpp
eventpipe.cpp
+ eventpipeconfiguration.cpp
+ eventpipeevent.cpp
+ eventpipeeventinstance.cpp
+ eventpipefile.cpp
eventpipejsonfile.cpp
+ eventpipeprovider.cpp
+ eventpipebuffer.cpp
+ eventpipebuffermanager.cpp
eventstore.cpp
+ fastserializer.cpp
fcall.cpp
fieldmarshaler.cpp
finalizerthread.cpp
@@ -482,3 +489,7 @@ convert_to_absolute_path(VM_SOURCES_DAC ${VM_SOURCES_DAC})
add_subdirectory(dac)
add_subdirectory(wks)
+
+if(CLR_CMAKE_PLATFORM_LINUX)
+ add_subdirectory($ENV{__IntermediatesDir}/Generated/eventpipe ${CMAKE_CURRENT_BINARY_DIR}/eventpipe)
+endif(CLR_CMAKE_PLATFORM_LINUX)
diff --git a/src/vm/arm/asmhelpers.S b/src/vm/arm/asmhelpers.S
index 04d7527180..36933f5ea6 100644
--- a/src/vm/arm/asmhelpers.S
+++ b/src/vm/arm/asmhelpers.S
@@ -509,6 +509,24 @@ LOCAL_LABEL(UM2MThunk_WrapperHelper_ArgumentsSetup):
NESTED_END ThePreStub, _TEXT
// ------------------------------------------------------------------
+ NESTED_ENTRY ThePreStubCompactARM, _TEXT, NoHandler
+
+ // r12 - address of compact entry point + PC_REG_RELATIVE_OFFSET
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ mov r0, r12
+
+ bl C_FUNC(PreStubGetMethodDescForCompactEntryPoint)
+
+ mov r12, r0 // pMethodDesc
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ b C_FUNC(ThePreStub)
+
+ NESTED_END ThePreStubCompactARM, _TEXT
+// ------------------------------------------------------------------
// This method does nothing. It's just a fixed function for the debugger to put a breakpoint on.
LEAF_ENTRY ThePreStubPatch, _TEXT
nop
diff --git a/src/vm/arm/asmhelpers.asm b/src/vm/arm/asmhelpers.asm
index 542bdc65cc..e5fd41a513 100644
--- a/src/vm/arm/asmhelpers.asm
+++ b/src/vm/arm/asmhelpers.asm
@@ -24,6 +24,7 @@
IMPORT UMThunkStubRareDisableWorker
IMPORT UM2MDoADCallBack
IMPORT PreStubWorker
+ IMPORT PreStubGetMethodDescForCompactEntryPoint
IMPORT NDirectImportWorker
IMPORT ObjIsInstanceOfNoGC
IMPORT ArrayStoreCheck
@@ -571,6 +572,26 @@ UM2MThunk_WrapperHelper_ArgumentsSetup
NESTED_END
; ------------------------------------------------------------------
+
+ NESTED_ENTRY ThePreStubCompactARM
+
+ ; r12 - address of compact entry point + PC_REG_RELATIVE_OFFSET
+
+ PROLOG_WITH_TRANSITION_BLOCK
+
+ mov r0, r12
+
+ bl PreStubGetMethodDescForCompactEntryPoint
+
+ mov r12, r0 ; pMethodDesc
+
+ EPILOG_WITH_TRANSITION_BLOCK_TAILCALL
+
+ b ThePreStub
+
+ NESTED_END
+
+; ------------------------------------------------------------------
; This method does nothing. It's just a fixed function for the debugger to put a breakpoint on.
LEAF_ENTRY ThePreStubPatch
nop
diff --git a/src/vm/arm/cgencpu.h b/src/vm/arm/cgencpu.h
index 34af8187b2..181d5f10eb 100644
--- a/src/vm/arm/cgencpu.h
+++ b/src/vm/arm/cgencpu.h
@@ -57,7 +57,7 @@ EXTERN_C void checkStack(void);
#define JUMP_ALLOCATE_SIZE 8 // # bytes to allocate for a jump instruction
#define BACK_TO_BACK_JUMP_ALLOCATE_SIZE 8 // # bytes to allocate for a back to back jump instruction
-//#define HAS_COMPACT_ENTRYPOINTS 1
+#define HAS_COMPACT_ENTRYPOINTS 1
#define HAS_NDIRECT_IMPORT_PRECODE 1
@@ -90,6 +90,12 @@ EXTERN_C void setFPReturn(int fpSize, INT64 retVal);
// this is the offset by which it should be decremented to arrive at the callsite.
#define STACKWALK_CONTROLPC_ADJUST_OFFSET 2
+// Max offset for unconditional thumb branch
+#define MAX_OFFSET_UNCONDITIONAL_BRANCH_THUMB 2048
+
+// Offset of pc register
+#define PC_REG_RELATIVE_OFFSET 4
+
//=======================================================================
// IMPORTANT: This value is used to figure out how much to allocate
// for a fixed array of FieldMarshaler's. That means it must be at least
@@ -236,6 +242,53 @@ void emitCOMStubCall (ComCallMethodDesc *pCOMMethod, PCODE target);
#endif // FEATURE_COMINTEROP
//------------------------------------------------------------------------
+inline void emitUnconditionalBranchThumb(LPBYTE pBuffer, int16_t offset)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ uint16_t *pInstr = (uint16_t *) pBuffer;
+
+ // offset from -2KB to +2KB
+ _ASSERTE (offset >= - MAX_OFFSET_UNCONDITIONAL_BRANCH_THUMB && offset < MAX_OFFSET_UNCONDITIONAL_BRANCH_THUMB);
+
+ if (offset >= 0)
+ {
+ offset = offset >> 1;
+ }
+ else
+ {
+ offset = ((MAX_OFFSET_UNCONDITIONAL_BRANCH_THUMB + offset) >> 1) | 0x400;
+ }
+
+ *pInstr = 0xE000 | offset;
+}
+
+//------------------------------------------------------------------------
+inline int16_t decodeUnconditionalBranchThumb(LPBYTE pBuffer)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ uint16_t *pInstr = (uint16_t *) pBuffer;
+
+ int16_t offset = (~0xE000) & (*pInstr);
+
+ if ((offset & 0x400) == 0)
+ {
+ offset = offset << 1;
+ }
+ else
+ {
+ offset = (~0x400) & offset;
+ offset = (offset << 1) - MAX_OFFSET_UNCONDITIONAL_BRANCH_THUMB;
+ }
+
+ // offset from -2KB to +2KB
+ _ASSERTE (offset >= - MAX_OFFSET_UNCONDITIONAL_BRANCH_THUMB && offset < MAX_OFFSET_UNCONDITIONAL_BRANCH_THUMB);
+
+ return offset;
+}
+
+//------------------------------------------------------------------------
inline void emitJump(LPBYTE pBuffer, LPVOID target)
{
LIMITED_METHOD_CONTRACT;
diff --git a/src/vm/arm/stubs.cpp b/src/vm/arm/stubs.cpp
index f1ba278ada..3088761f0b 100644
--- a/src/vm/arm/stubs.cpp
+++ b/src/vm/arm/stubs.cpp
@@ -1333,6 +1333,13 @@ BOOL DoesSlotCallPrestub(PCODE pCode)
{
PTR_WORD pInstr = dac_cast<PTR_WORD>(PCODEToPINSTR(pCode));
+#ifdef HAS_COMPACT_ENTRYPOINTS
+ if (MethodDescChunk::GetMethodDescFromCompactEntryPoint(pCode, TRUE) != NULL)
+ {
+ return TRUE;
+ }
+#endif // HAS_COMPACT_ENTRYPOINTS
+
// FixupPrecode
if (pInstr[0] == 0x46fc && // // mov r12, pc
pInstr[1] == 0xf8df &&
diff --git a/src/vm/arm64/asmhelpers.S b/src/vm/arm64/asmhelpers.S
index 15b8057ed6..79e398937a 100644
--- a/src/vm/arm64/asmhelpers.S
+++ b/src/vm/arm64/asmhelpers.S
@@ -217,6 +217,7 @@ LEAF_END ThePreStubPatch, _TEXT
// x13 : incremented by 8
// x14 : incremented by 8
// x15 : trashed
+// x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
//
WRITE_BARRIER_ENTRY JIT_ByRefWriteBarrier
@@ -236,6 +237,7 @@ WRITE_BARRIER_END JIT_ByRefWriteBarrier
// x12 : trashed
// x14 : incremented by 8
// x15 : trashed
+// x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
//
WRITE_BARRIER_ENTRY JIT_CheckedWriteBarrier
PREPARE_EXTERNAL_VAR g_lowest_address, x12
@@ -262,6 +264,7 @@ WRITE_BARRIER_END JIT_CheckedWriteBarrier
// x12 : trashed
// x14 : incremented by 8
// x15 : trashed
+// x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
//
WRITE_BARRIER_ENTRY JIT_WriteBarrier
dmb ST
@@ -310,6 +313,21 @@ LOCAL_LABEL(shadowupdateend):
ldp x12, x13, [sp],#16
#endif
+#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
+ // Update the write watch table if necessary
+ PREPARE_EXTERNAL_VAR g_sw_ww_enabled_for_gc_heap, x12
+ ldrb w12, [x12]
+ cbz x12, LOCAL_LABEL(CheckCardTable)
+ PREPARE_EXTERNAL_VAR g_sw_ww_table, x12
+ ldr x12, [x12]
+ add x12, x12, x14, lsr #0xc // SoftwareWriteWatch::AddressToTableByteIndexShift
+ ldrb w17, [x12]
+ cbnz x17, LOCAL_LABEL(CheckCardTable)
+ mov w17, #0xFF
+ strb w17, [x12]
+#endif
+
+LOCAL_LABEL(CheckCardTable):
// Branch to Exit if the reference is not in the Gen0 heap
//
PREPARE_EXTERNAL_VAR g_ephemeral_low, x12
@@ -333,6 +351,21 @@ LOCAL_LABEL(shadowupdateend):
LOCAL_LABEL(UpdateCardTable):
mov x12, 0xFF
strb w12, [x15]
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ // Check if we need to update the card table
+ PREPARE_EXTERNAL_VAR g_card_bundle_table, x12
+ ldr x12, [x12]
+ add x15, x12, x14, lsr #21
+ ldrb w12, [x15]
+ cmp x12, 0xFF
+ beq LOCAL_LABEL(Exit)
+
+LOCAL_LABEL(UpdateCardBundle):
+ mov x12, 0xFF
+ strb w12, [x15]
+#endif
+
LOCAL_LABEL(Exit):
add x14, x14, 8
ret lr
diff --git a/src/vm/arm64/crthelpers.S b/src/vm/arm64/crthelpers.S
index 36eb4ee7a3..c8b108ca8f 100644
--- a/src/vm/arm64/crthelpers.S
+++ b/src/vm/arm64/crthelpers.S
@@ -18,62 +18,109 @@
//
//void JIT_MemSet(void *dst, int val, SIZE_T count)
//
-// uintptr_t valEx = (unsigned char)val;
+// uint64_t valEx = (unsigned char)val;
// valEx = valEx | valEx << 8;
// valEx = valEx | valEx << 16;
// valEx = valEx | valEx << 32;
//
+// size_t dc_zva_size = 4ULL << DCZID_EL0.BS;
+//
+// uint64_t use_dc_zva = (val == 0) && !DCZID_EL0.p ? count / (2 * dc_zva_size) : 0; // ~Minimum size (assumes worst case alignment)
+//
// // If not aligned then make it 8-byte aligned
-// if(((uintptr_t)dst&0x7) != 0)
+// if(((uint64_t)dst&0xf) != 0)
// {
-// if(((uintptr_t)dst&0x3) == 0)
+// // Calculate alignment we can do without exceeding count
+// // Use math to avoid introducing more unpredictable branches
+// // Due to inherent mod in lsr, ~7 is used instead of ~0 to handle count == 0
+// // Note logic will fail is count >= (1 << 61). But this exceeds max physical memory for arm64
+// uint8_t align = (dst & 0x7) & (~uint64_t(7) >> (countLeadingZeros(count) mod 64))
+//
+// if(align&0x1)
// {
-// *(UINT*)dst = (UINT)valEx;
-// dst = (UINT*)dst + 1;
+// *(unit8_t*)dst = (unit8_t)valEx;
+// dst = (unit8_t*)dst + 1;
+// count-=1;
+// }
+//
+// if(align&0x2)
+// {
+// *(unit16_t*)dst = (unit16_t)valEx;
+// dst = (unit16_t*)dst + 1;
+// count-=2;
+// }
+//
+// if(align&0x4)
+// {
+// *(unit32_t*)dst = (unit32_t)valEx;
+// dst = (unit32_t*)dst + 1;
// count-=4;
// }
-// else if(((uintptr_t)dst&0x1) == 0)
+// }
+//
+// if(use_dc_zva)
+// {
+// // If not aligned then make it aligned to dc_zva_size
+// if(dst&0x8)
+// {
+// *(uint64_t*)dst = (uint64_t)valEx;
+// dst = (uint64_t*)dst + 1;
+// count-=8;
+// }
+//
+// while(dst & (dc_zva_size - 1))
// {
-// while(count > 0 && ((uintptr_t)dst&0x7) != 0)
-// {
-// *(short*)dst = (short)valEx;
-// dst = (short*)dst + 1;
-// count-=2;
-// }
+// *(uint64_t*)dst = valEx;
+// dst = (uint64_t*)dst + 1;
+// *(uint64_t*)dst = valEx;
+// dst = (uint64_t*)dst + 1;
+// count-=16;
// }
-// else
+//
+// count -= dc_zva_size;
+//
+// while(count >= 0)
// {
-// while(count > 0 && ((uintptr_t)dst&0x7) != 0)
-// {
-// *(char*)dst = (char)valEx;
-// dst = (char*)dst + 1;
-// count--;
-// }
+// dc_zva(dst);
+// dst = (uint8_t*)dst + dc_zva_size;
+// count-=dc_zva_size;
// }
+//
+// count += dc_zva_size;
// }
//
-// while(count >= 8)
+// count-=16;
+//
+// while(count >= 0)
// {
-// *(uintptr_t*)dst = valEx;
-// dst = (uintptr_t*)dst + 1;
-// count-=8;
+// *(uint64_t*)dst = valEx;
+// dst = (uint64_t*)dst + 1;
+// *(uint64_t*)dst = valEx;
+// dst = (uint64_t*)dst + 1;
+// count-=16;
+// }
+//
+// if(count & 8)
+// {
+// *(uint64_t*)dst = valEx;
+// dst = (uint64_t*)dst + 1;
// }
//
// if(count & 4)
// {
-// *(UINT*)dst = (UINT)valEx;
-// dst = (UINT*)dst + 1;
+// *(uint32_t*)dst = (uint32_t)valEx;
+// dst = (uint32_t*)dst + 1;
// }
//
// if(count & 2)
// {
-// *(short*)dst = (short)valEx;
-// dst = (short*)dst + 1;
+// *(uint16_t*)dst = (uint16_t)valEx;
+// dst = (uint16_t*)dst + 1;
// }
//
// if(count & 1)
// {
-// *(char*)dst = (char)valEx;
+// *(uint8_t*)dst = (uint8_t)valEx;
// }
//
//
@@ -85,68 +132,89 @@
// as C++ method.
LEAF_ENTRY JIT_MemSet, _TEXT
- uxtb w8,w1
- sxtw x8,w8
- orr x8,x8,x8, lsl #8
- orr x8,x8,x8, lsl #0x10
- orr x9,x8,x8, lsl #0x20
- and x8,x0,#7
- cbz x8,LOCAL_LABEL(JIT_MemSet_0x7c)
- and x8,x0,#3
- cbnz x8,LOCAL_LABEL(JIT_MemSet_0x38)
- str w9,[x0]
- add x0,x0,#4
- mov x8,#-4
- add x2,x2,x8
- b LOCAL_LABEL(JIT_MemSet_0x7c)
-LOCAL_LABEL(JIT_MemSet_0x38):
- cbz x2,LOCAL_LABEL(JIT_MemSet_0x7c)
- tbnz x0,#0,LOCAL_LABEL(JIT_MemSet_0x60)
-LOCAL_LABEL(JIT_MemSet_0x40):
- and x8,x0,#7
- cbz x8,LOCAL_LABEL(JIT_MemSet_0x7c)
- strh w9,[x0]
- add x0,x0,#2
- mov x8,#-2
- add x2,x2,x8
- cbnz x2,LOCAL_LABEL(JIT_MemSet_0x40)
- b LOCAL_LABEL(JIT_MemSet_0x7c)
-LOCAL_LABEL(JIT_MemSet_0x60):
- and x8,x0,#7
- cbz x8,LOCAL_LABEL(JIT_MemSet_0x7c)
- strb w9,[x0]
- add x0,x0,#1
- mov x8,#-1
- add x2,x2,x8
- cbnz x2,LOCAL_LABEL(JIT_MemSet_0x60)
-LOCAL_LABEL(JIT_MemSet_0x7c):
- cmp x2,#8
- blo LOCAL_LABEL(JIT_MemSet_0xb8)
- lsr x8,x2,#3
- mov x11,x8
- mov x10,x0
- add x8,x10,x11, lsl #3
+ ands w8, w1, #0xff
+ mrs x3, DCZID_EL0 // x3 = DCZID_EL0
+ mov x6, #4
+ lsr x11, x2, #3 // x11 = count >> 3
+
+ orr w8, w8, w8, lsl #8
+ and x5, x3, #0xf // x5 = dczid_el0.bs
+ csel x11, x11, xzr, eq // x11 = (val == 0) ? count >> 3 : 0
+ tst x3, (1 << 4)
+
+ orr w8, w8, w8, lsl #0x10
+ csel x11, x11, xzr, eq // x11 = (val == 0) && !DCZID_EL0.p ? count >> 3 : 0
+ ands x3, x0, #7 // x3 = dst & 7
+ lsl x9, x6, x5 // x9 = size
+
+ orr x8, x8, x8, lsl #0x20
+ lsr x11, x11, x5 // x11 = (val == 0) && !DCZID_EL0.p ? count >> (3 + DCZID_EL0.bs) : 0
+ sub x10, x9, #1 // x10 = mask
+
+ b.eq LOCAL_LABEL(JIT_MemSet_0x80)
+
+ movn x4, #7
+ clz x5, x2
+ lsr x4, x4, x5
+ and x3, x3, x4
+
+ tbz x3, #0, LOCAL_LABEL(JIT_MemSet_0x2c)
+ strb w8, [x0], #1
+ sub x2, x2, #1
+LOCAL_LABEL(JIT_MemSet_0x2c):
+ tbz x3, #1, LOCAL_LABEL(JIT_MemSet_0x5c)
+ strh w8, [x0], #2
+ sub x2, x2, #2
+LOCAL_LABEL(JIT_MemSet_0x5c):
+ tbz x3, #2, LOCAL_LABEL(JIT_MemSet_0x80)
+ str w8, [x0], #4
+ sub x2, x2, #4
+LOCAL_LABEL(JIT_MemSet_0x80):
+ cbz x11, LOCAL_LABEL(JIT_MemSet_0x9c)
+ tbz x0, #3, LOCAL_LABEL(JIT_MemSet_0x84)
+ str x8, [x0], #8
+ sub x2, x2, #8
+
+ b LOCAL_LABEL(JIT_MemSet_0x85)
+LOCAL_LABEL(JIT_MemSet_0x84):
+ stp x8, x8, [x0], #16
+ sub x2, x2, #16
+LOCAL_LABEL(JIT_MemSet_0x85):
+ tst x0, x10
+ b.ne LOCAL_LABEL(JIT_MemSet_0x84)
+
+ b LOCAL_LABEL(JIT_MemSet_0x8a)
+LOCAL_LABEL(JIT_MemSet_0x88):
+ dc zva, x0
+ add x0, x0, x9
+LOCAL_LABEL(JIT_MemSet_0x8a):
+ subs x2, x2, x9
+ b.ge LOCAL_LABEL(JIT_MemSet_0x88)
+
+LOCAL_LABEL(JIT_MemSet_0x8c):
+ add x2, x2, x9
+
LOCAL_LABEL(JIT_MemSet_0x9c):
- cmp x10,x8
- beq LOCAL_LABEL(JIT_MemSet_0xac)
- str x9,[x10],#8
- b LOCAL_LABEL(JIT_MemSet_0x9c)
-LOCAL_LABEL(JIT_MemSet_0xac):
- mov x8,#-8
- madd x2,x11,x8,x2
- add x0,x0,x11, lsl #3
-LOCAL_LABEL(JIT_MemSet_0xb8):
- tbz x2,#2,LOCAL_LABEL(JIT_MemSet_0xc4)
- str w9,[x0]
- add x0,x0,#4
-LOCAL_LABEL(JIT_MemSet_0xc4):
- tbz x2,#1,LOCAL_LABEL(JIT_MemSet_0xd0)
- strh w9,[x0]
- add x0,x0,#2
-LOCAL_LABEL(JIT_MemSet_0xd0):
- tbz x2,#0,LOCAL_LABEL(JIT_MemSet_0xd8)
- strb w9,[x0]
-LOCAL_LABEL(JIT_MemSet_0xd8):
+ b LOCAL_LABEL(JIT_MemSet_0xa8)
+LOCAL_LABEL(JIT_MemSet_0xa0):
+ stp x8, x8, [x0], #16
+LOCAL_LABEL(JIT_MemSet_0xa8):
+ subs x2, x2, #16
+ b.ge LOCAL_LABEL(JIT_MemSet_0xa0)
+
+LOCAL_LABEL(JIT_MemSet_0xb0):
+ tbz x2, #3, LOCAL_LABEL(JIT_MemSet_0xb4)
+ str x8, [x0], #8
+LOCAL_LABEL(JIT_MemSet_0xb4):
+ tbz x2, #2, LOCAL_LABEL(JIT_MemSet_0xc8)
+ str w8, [x0], #4
+LOCAL_LABEL(JIT_MemSet_0xc8):
+ tbz x2, #1, LOCAL_LABEL(JIT_MemSet_0xdc)
+ strh w8, [x0], #2
+LOCAL_LABEL(JIT_MemSet_0xdc):
+ tbz x2, #0, LOCAL_LABEL(JIT_MemSet_0xe8)
+ strb w8, [x0]
+LOCAL_LABEL(JIT_MemSet_0xe8):
ret lr
LEAF_END_MARKED JIT_MemSet, _TEXT
@@ -157,60 +225,74 @@ LEAF_END_MARKED JIT_MemSet, _TEXT
// // If not aligned then make it 8-byte aligned
// if(((uintptr_t)dst&0x7) != 0)
// {
-// if(((uintptr_t)dst&0x3) == 0)
+// // Calculate alignment we can do without exceeding count
+// // Use math to avoid introducing more unpredictable branches
+// // Due to inherent mod in lsr, ~7 is used instead of ~0 to handle count == 0
+// // Note logic will fail is count >= (1 << 61). But this exceeds max physical memory for arm64
+// uint8_t align = (dst & 0x7) & (~uint64_t(7) >> (countLeadingZeros(count) mod 64))
+//
+// if(align&0x1)
// {
-// *(UINT*)dst = *(UINT*)src;
-// dst = (UINT*)dst + 1;
-// src = (UINT*)src + 1;
-// count-=4;
+// *(unit8_t*)dst = *(unit8_t*)src;
+// dst = (unit8_t*)dst + 1;
+// src = (unit8_t*)src + 1;
+// count-=1;
// }
-// else if(((uintptr_t)dst&0x1) == 0)
+//
+// if(align&0x2)
// {
-// while(count > 0 && ((uintptr_t)dst&0x7) != 0)
-// {
-// *(short*)dst = *(short*)src;
-// dst = (short*)dst + 1;
-// src = (short*)src + 1;
-// count-=2;
-// }
+// *(unit16_t*)dst = *(unit16_t*)src;
+// dst = (unit16_t*)dst + 1;
+// src = (unit16_t*)src + 1;
+// count-=2;
// }
-// else
+//
+// if(align&0x4)
// {
-// while(count > 0 && ((uintptr_t)dst&0x7) != 0)
-// {
-// *(char*)dst = *(char*)src;
-// dst = (char*)dst + 1;
-// src = (char*)src + 1;
-// count--;
-// }
+// *(unit32_t*)dst = *(unit32_t*)src;
+// dst = (unit32_t*)dst + 1;
+// src = (unit32_t*)src + 1;
+// count-=4;
// }
// }
//
-// while(count >= 8)
+// count-=16;
+//
+// while(count >= 0)
+// {
+// *(unit64_t*)dst = *(unit64_t*)src;
+// dst = (unit64_t*)dst + 1;
+// src = (unit64_t*)src + 1;
+// *(unit64_t*)dst = *(unit64_t*)src;
+// dst = (unit64_t*)dst + 1;
+// src = (unit64_t*)src + 1;
+// count-=16;
+// }
+//
+// if(count & 8)
// {
-// *(uintptr_t*)dst = *(uintptr_t*)src;
-// dst = (uintptr_t*)dst + 1;
-// src = (uintptr_t*)src + 1;
-// count-=8;
+// *(unit64_t*)dst = *(unit64_t*)src;
+// dst = (unit64_t*)dst + 1;
+// src = (unit64_t*)src + 1;
// }
//
// if(count & 4)
// {
-// *(UINT*)dst = *(UINT*)src;
-// dst = (UINT*)dst + 1;
-// src = (UINT*)src + 1;
+// *(unit32_t*)dst = *(unit32_t*)src;
+// dst = (unit32_t*)dst + 1;
+// src = (unit32_t*)src + 1;
// }
//
// if(count & 2)
// {
-// *(short*)dst = *(short*)src;
-// dst = (short*)dst + 1;
-// src = (short*)src + 1;
+// *(unit16_t*)dst = *(unit16_t*)src;
+// dst = (unit16_t*)dst + 1;
+// src = (unit16_t*)src + 1;
// }
//
// if(count & 1)
// {
-// *(char*)dst = *(char*)src;
+// *(unit8_t*)dst = *(unit8_t*)src;
// }
//
//
@@ -218,69 +300,49 @@ LEAF_END_MARKED JIT_MemSet, _TEXT
// Assembly code corresponding to above C++ method.
// See comments above for JIT_MemSet method
LEAF_ENTRY JIT_MemCpy, _TEXT
- and x8,x0,#7
- cbz x8,LOCAL_LABEL(JIT_MemCpy_0x80)
- and x8,x0,#3
- cbnz x8,LOCAL_LABEL(JIT_MemCpy_0x2c)
- ldr w8,[x1]
- str w8,[x0]
- add x0,x0,#4
- add x1,x1,#4
- mov x8,#-4
- add x2,x2,x8
- b LOCAL_LABEL(JIT_MemCpy_0x80)
+ ands x3, x0, #7
+ movn x4, #7
+ clz x5, x2
+ b.eq LOCAL_LABEL(JIT_MemCpy_0xa8)
+ lsr x4, x4, x5
+ and x3, x3, x4
+ tbz x3, #0, LOCAL_LABEL(JIT_MemCpy_0x2c)
+ ldrsb w8, [x1], #1
+ strb w8, [x0], #1
+ sub x2, x2, #1
LOCAL_LABEL(JIT_MemCpy_0x2c):
- cbz x2,LOCAL_LABEL(JIT_MemCpy_0x80)
- tbnz x0,#0,LOCAL_LABEL(JIT_MemCpy_0x5c)
-LOCAL_LABEL(JIT_MemCpy_0x34):
- and x8,x0,#7
- cbz x8,LOCAL_LABEL(JIT_MemCpy_0x80)
- ldrsh w8,[x1]
- strh w8,[x0]
- add x0,x0,#2
- add x1,x1,#2
- mov x8,#-2
- add x2,x2,x8
- cbnz x2,LOCAL_LABEL(JIT_MemCpy_0x34)
- b LOCAL_LABEL(JIT_MemCpy_0x80)
+ tbz x3, #1, LOCAL_LABEL(JIT_MemCpy_0x5c)
+ ldrsh w8, [x1], #2
+ strh w8, [x0], #2
+ sub x2, x2, #2
LOCAL_LABEL(JIT_MemCpy_0x5c):
- and x8,x0,#7
- cbz x8,LOCAL_LABEL(JIT_MemCpy_0x80)
- ldrsb w8,[x1]
- strb w8,[x0]
- add x0,x0,#1
- add x1,x1,#1
- mov x8,#-1
- add x2,x2,x8
- cbnz x2,LOCAL_LABEL(JIT_MemCpy_0x5c)
-LOCAL_LABEL(JIT_MemCpy_0x80):
- cmp x2,#8
- blo LOCAL_LABEL(JIT_MemCpy_0xb4)
- lsr x9,x2,#3
- mov x8,#-8
- madd x2,x9,x8,x2
+ tbz x3, #2, LOCAL_LABEL(JIT_MemCpy_0xa8)
+ ldr w8, [x1], #4
+ str w8, [x0], #4
+ sub x2, x2, #4
+ b LOCAL_LABEL(JIT_MemCpy_0xa8)
LOCAL_LABEL(JIT_MemCpy_0xa0):
- ldr x8,[x1],#8
- str x8,[x0],#8
- mov x8,#-1
- add x9,x9,x8
- cbnz x9,LOCAL_LABEL(JIT_MemCpy_0xa0)
+ ldp x8, x9, [x1], #16
+ stp x8, x9, [x0], #16
+LOCAL_LABEL(JIT_MemCpy_0xa8):
+ subs x2, x2, #16
+ b.ge LOCAL_LABEL(JIT_MemCpy_0xa0)
+LOCAL_LABEL(JIT_MemCpy_0xb0):
+ tbz x2, #3, LOCAL_LABEL(JIT_MemCpy_0xb4)
+ ldr x8, [x1], #8
+ str x8, [x0], #8
LOCAL_LABEL(JIT_MemCpy_0xb4):
- tbz x2,#2,LOCAL_LABEL(JIT_MemCpy_0xc8)
- ldr w8,[x1]
- str w8,[x0]
- add x0,x0,#4
- add x1,x1,#4
+ tbz x2, #2, LOCAL_LABEL(JIT_MemCpy_0xc8)
+ ldr w8, [x1], #4
+ str w8, [x0], #4
LOCAL_LABEL(JIT_MemCpy_0xc8):
- tbz x2,#1,LOCAL_LABEL(JIT_MemCpy_0xdc)
- ldrsh w8,[x1]
- strh w8,[x0]
- add x0,x0,#2
- add x1,x1,#2
+ tbz x2, #1, LOCAL_LABEL(JIT_MemCpy_0xdc)
+ ldrsh w8, [x1], #2
+ strh w8, [x0], #2
LOCAL_LABEL(JIT_MemCpy_0xdc):
- tbz x2,#0,LOCAL_LABEL(JIT_MemCpy_0xe8)
- ldrsb w8,[x1]
- strb w8,[x0]
+ tbz x2, #0, LOCAL_LABEL(JIT_MemCpy_0xe8)
+ ldrsb w8, [x1]
+ strb w8, [x0]
LOCAL_LABEL(JIT_MemCpy_0xe8):
ret lr
LEAF_END_MARKED JIT_MemCpy, _TEXT
diff --git a/src/vm/arm64/stubs.cpp b/src/vm/arm64/stubs.cpp
index 0c7eb4dfba..40d274959f 100644
--- a/src/vm/arm64/stubs.cpp
+++ b/src/vm/arm64/stubs.cpp
@@ -1317,6 +1317,19 @@ void StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck)
return;
}
+#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
+void SwitchToWriteWatchBarrier(bool isRuntimeSuspended)
+{
+ return;
+}
+
+void SwitchToNonWriteWatchBarrier(bool isRuntimeSuspended)
+{
+ return;
+}
+#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
+
+
#ifdef DACCESS_COMPILE
BOOL GetAnyThunkTarget (T_CONTEXT *pctx, TADDR *pTarget, TADDR *pTargetMethodDesc)
{
diff --git a/src/vm/assemblyname.cpp b/src/vm/assemblyname.cpp
index 90e2a467e1..6c8367e506 100644
--- a/src/vm/assemblyname.cpp
+++ b/src/vm/assemblyname.cpp
@@ -57,20 +57,11 @@ FCIMPL1(Object*, AssemblyNameNative::GetFileInformation, StringObject* filenameU
SString sFileName(gc.filename->GetBuffer());
PEImageHolder pImage = PEImage::OpenImage(sFileName, MDInternalImport_NoCache);
- EX_TRY
- {
- // Allow AssemblyLoadContext.GetAssemblyName for native images on CoreCLR
- if (pImage->HasNTHeaders() && pImage->HasCorHeader() && pImage->HasNativeHeader())
- pImage->VerifyIsNIAssembly();
- else
- pImage->VerifyIsAssembly();
- }
- EX_CATCH
- {
- Exception *ex = GET_EXCEPTION();
- EEFileLoadException::Throw(sFileName,ex->GetHR(),ex);
- }
- EX_END_CATCH_UNREACHABLE;
+ // Allow AssemblyLoadContext.GetAssemblyName for native images on CoreCLR
+ if (pImage->HasNTHeaders() && pImage->HasCorHeader() && pImage->HasNativeHeader())
+ pImage->VerifyIsNIAssembly();
+ else
+ pImage->VerifyIsAssembly();
SString sUrl = sFileName;
PEAssembly::PathToUrl(sUrl);
diff --git a/src/vm/assemblynative.cpp b/src/vm/assemblynative.cpp
index b9079ec06a..e9bcc2366b 100644
--- a/src/vm/assemblynative.cpp
+++ b/src/vm/assemblynative.cpp
@@ -326,110 +326,6 @@ Assembly* AssemblyNative::LoadFromBuffer(BOOL fForIntrospection, const BYTE* pAs
return pAssembly;
}
-FCIMPL6(Object*, AssemblyNative::LoadImage, U1Array* PEByteArrayUNSAFE,
- U1Array* SymByteArrayUNSAFE, Object* securityUNSAFE,
- StackCrawlMark* stackMark, CLR_BOOL fForIntrospection, SecurityContextSource securityContextSource)
-{
- FCALL_CONTRACT;
-
- struct _gc
- {
- U1ARRAYREF PEByteArray;
- U1ARRAYREF SymByteArray;
- OBJECTREF security;
- OBJECTREF Throwable;
- OBJECTREF refRetVal;
- } gc;
-
- gc.PEByteArray = (U1ARRAYREF) PEByteArrayUNSAFE;
- gc.SymByteArray = (U1ARRAYREF) SymByteArrayUNSAFE;
- gc.security = (OBJECTREF) securityUNSAFE;
- gc.Throwable = NULL;
- gc.refRetVal = NULL;
-
- HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
-
-
- if (gc.PEByteArray == NULL)
- COMPlusThrow(kArgumentNullException, W("ArgumentNull_Array"));
-
- NewArrayHolder<BYTE> pbSyms;
- DWORD cbSyms = 0;
-
-#ifdef DEBUGGING_SUPPORTED
- // If we were given symbols, save a copy of them.
- // the debugger, load them now).
- if (gc.SymByteArray != NULL)
- {
- Security::CopyByteArrayToEncoding(&gc.SymByteArray,
- &pbSyms, &cbSyms);
-
- }
-#endif // DEBUGGING_SUPPORTED
-
- Assembly* pAssembly = NULL;
- // Pin byte array for loading
- {
- Wrapper<OBJECTHANDLE, DoNothing, DestroyPinningHandle> handle(
- GetAppDomain()->CreatePinningHandle(gc.PEByteArray));
-
- const BYTE *pbImage = gc.PEByteArray->GetDirectConstPointerToNonObjectElements();
- DWORD cbImage = gc.PEByteArray->GetNumComponents();
- pAssembly = LoadFromBuffer(fForIntrospection, pbImage, cbImage, pbSyms, cbSyms, stackMark, OBJECTREFToObject(gc.security), securityContextSource);
- }
-
-
- if (pAssembly != NULL)
- gc.refRetVal = pAssembly->GetExposedObject();
-
- HELPER_METHOD_FRAME_END();
-
- return OBJECTREFToObject(gc.refRetVal);
-}
-FCIMPLEND
-
-FCIMPL2(Object*, AssemblyNative::LoadFile, StringObject* pathUNSAFE, Object* securityUNSAFE)
-{
- FCALL_CONTRACT;
-
- struct _gc {
- OBJECTREF refRetVal;
- OBJECTREF refSecurity;
- STRINGREF strPath;
- } gc;
-
- gc.refRetVal = NULL;
- gc.refSecurity = ObjectToOBJECTREF(securityUNSAFE);
- gc.strPath = ObjectToSTRINGREF(pathUNSAFE);
-
- HELPER_METHOD_FRAME_BEGIN_RET_PROTECT(gc);
-
- if(CorHost2::IsLoadFromBlocked())
- COMPlusThrow(kFileLoadException, FUSION_E_LOADFROM_BLOCKED);
-
- if (pathUNSAFE == NULL)
- COMPlusThrow(kArgumentNullException, W("ArgumentNull_Path"));
-
- StackSString path;
- gc.strPath->GetSString(path);
-
- Assembly *pAssembly = AssemblySpec::LoadAssembly(path);
-
- LOG((LF_CLASSLOADER,
- LL_INFO100,
- "\tLoaded assembly from a file\n"));
-
-
- if (pAssembly != NULL)
- gc.refRetVal = (ASSEMBLYREF) pAssembly->GetExposedObject();
-
- HELPER_METHOD_FRAME_END();
-
- return OBJECTREFToObject(gc.refRetVal);
-}
-FCIMPLEND
-
-
/* static */
Assembly* AssemblyNative::LoadFromPEImage(ICLRPrivBinder* pBinderContext, PEImage *pILImage, PEImage *pNIImage)
{
@@ -536,42 +432,6 @@ Assembly* AssemblyNative::LoadFromPEImage(ICLRPrivBinder* pBinderContext, PEImag
RETURN pLoadedAssembly;
}
-/* static */
-void QCALLTYPE AssemblyNative::GetLoadedAssembliesInternal(QCall::ObjectHandleOnStack assemblies)
-{
- QCALL_CONTRACT;
-
- BEGIN_QCALL;
-
- MethodTable * pAssemblyClass = MscorlibBinder::GetClass(CLASS__ASSEMBLY);
-
- PTR_AppDomain pCurDomain = GetAppDomain();
-
- SetSHash<PTR_DomainAssembly> assemblySet;
- pCurDomain->GetCacheAssemblyList(assemblySet);
- size_t nArrayElems = assemblySet.GetCount();
- PTRARRAYREF AsmArray = NULL;
-
- GCX_COOP();
-
- GCPROTECT_BEGIN(AsmArray);
- AsmArray = (PTRARRAYREF) AllocateObjectArray( (DWORD)nArrayElems, pAssemblyClass);
- for(auto it = assemblySet.Begin(); it != assemblySet.End(); it++)
- {
- PTR_DomainAssembly assem = *it;
- OBJECTREF o = (OBJECTREF)assem->GetExposedAssemblyObject();
- _ASSERTE(o != NULL);
- _ASSERTE(nArrayElems > 0);
- AsmArray->SetAt(--nArrayElems, o);
- }
-
- assemblies.Set(AsmArray);
-
- GCPROTECT_END();
-
- END_QCALL;
-}
-
/* static */
void QCALLTYPE AssemblyNative::LoadFromPath(INT_PTR ptrNativeAssemblyLoadContext, LPCWSTR pwzILPath, LPCWSTR pwzNIPath, QCall::ObjectHandleOnStack retLoadedAssembly)
{
diff --git a/src/vm/assemblynative.hpp b/src/vm/assemblynative.hpp
index ece8100e95..71e8b51181 100644
--- a/src/vm/assemblynative.hpp
+++ b/src/vm/assemblynative.hpp
@@ -46,10 +46,6 @@ public:
static
void QCALLTYPE GetExecutingAssembly(QCall::StackCrawlMarkHandle stackMark, QCall::ObjectHandleOnStack retAssembly);
- static FCDECL2(Object*, LoadFile, StringObject* pathUNSAFE,
- Object* securityUNSAFE);
- static FCDECL6(Object*, LoadImage, U1Array* PEByteArrayUNSAFE, U1Array* SymByteArrayUNSAFE, Object* securityUNSAFE, StackCrawlMark* stackMark, CLR_BOOL fForIntrospection, SecurityContextSource securityContextSource);
-
static FCDECL10(Object*, Load, AssemblyNameBaseObject* assemblyNameUNSAFE,
StringObject* codeBaseUNSAFE,
Object* securityUNSAFE,
@@ -61,9 +57,6 @@ public:
CLR_BOOL fSuppressSecurityChecks,
INT_PTR ptrLoadContextBinder);
- static FCDECL1(FC_BOOL_RET, IsFrameworkAssembly, AssemblyNameBaseObject* refAssemblyNameUNSAFE);
- static FCDECL1(FC_BOOL_RET, IsNewPortableAssembly, AssemblyNameBaseObject* refAssemblyNameUNSAFE);
-
//
// instance FCALLs
//
@@ -208,7 +201,6 @@ public:
static BOOL QCALLTYPE OverrideDefaultAssemblyLoadContextForCurrentDomain(INT_PTR ptrNativeAssemblyLoadContext);
static BOOL QCALLTYPE CanUseAppPathAssemblyLoadContextInCurrentDomain();
static void QCALLTYPE LoadFromPath(INT_PTR ptrNativeAssemblyLoadContext, LPCWSTR pwzILPath, LPCWSTR pwzNIPath, QCall::ObjectHandleOnStack retLoadedAssembly);
- static void QCALLTYPE GetLoadedAssembliesInternal(QCall::ObjectHandleOnStack assemblies);
static INT_PTR QCALLTYPE InternalLoadUnmanagedDllFromPath(LPCWSTR unmanagedLibraryPath);
static void QCALLTYPE LoadFromStream(INT_PTR ptrNativeAssemblyLoadContext, INT_PTR ptrAssemblyArray, INT32 cbAssemblyArrayLength, INT_PTR ptrSymbolArray, INT32 cbSymbolArrayLength, QCall::ObjectHandleOnStack retLoadedAssembly);
static Assembly* LoadFromPEImage(ICLRPrivBinder* pBinderContext, PEImage *pILImage, PEImage *pNIImage);
diff --git a/src/vm/assemblyspec.cpp b/src/vm/assemblyspec.cpp
index e278c002fc..e5952c24d2 100644
--- a/src/vm/assemblyspec.cpp
+++ b/src/vm/assemblyspec.cpp
@@ -785,15 +785,6 @@ ICLRPrivBinder* AssemblySpec::GetBindingContextFromParentAssembly(AppDomain *pDo
// ICLRPrivAssembly implements ICLRPrivBinder and thus, "is a" binder in a manner of semantics.
pParentAssemblyBinder = pParentPEAssembly->GetBindingContext();
- if (pParentAssemblyBinder == NULL)
- {
- if (pParentPEAssembly->IsDynamic())
- {
- // If the parent assembly is dynamically generated, then use its fallback load context
- // as the binder.
- pParentAssemblyBinder = pParentPEAssembly->GetFallbackLoadContextBinder();
- }
- }
}
if (GetPreferFallbackLoadContextBinder())
@@ -811,13 +802,12 @@ ICLRPrivBinder* AssemblySpec::GetBindingContextFromParentAssembly(AppDomain *pDo
//
// 1) Domain Neutral assembly
// 2) Entrypoint assembly
- // 3) RefEmitted assembly
- // 4) AssemblyLoadContext.LoadFromAssemblyName
+ // 3) AssemblyLoadContext.LoadFromAssemblyName
//
// For (1) and (2), we will need to bind against the DefaultContext binder (aka TPA Binder). This happens
// below if we do not find the parent assembly binder.
//
- // For (3) and (4), fetch the fallback load context binder reference.
+ // For (3), fetch the fallback load context binder reference.
pParentAssemblyBinder = GetFallbackLoadContextBinderForRequestingAssembly();
}
diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp
index 5de7114eb1..6a1eb62959 100644
--- a/src/vm/ceeload.cpp
+++ b/src/vm/ceeload.cpp
@@ -9550,7 +9550,7 @@ void Module::Arrange(DataImage *image)
else if (TypeFromToken(token) == mdtFieldDef)
{
FieldDesc *pFD = LookupFieldDef(token);
- if (pFD && pFD->IsILOnlyRVAField())
+ if (pFD && pFD->IsRVA())
{
if (entry->flags & (1 << RVAFieldData))
{
@@ -12770,6 +12770,11 @@ void Module::LogTokenAccess(mdToken token, SectionFormat format, ULONG flagnum)
if (!m_nativeImageProfiling)
return;
+ if (flagnum >= CORBBTPROF_TOKEN_MAX_NUM_FLAGS)
+ {
+ return;
+ }
+
mdToken rid = RidFromToken(token);
CorTokenType tkType = (CorTokenType) TypeFromToken(token);
SectionFormat tkKind = (SectionFormat) (tkType >> 24);
@@ -12798,8 +12803,9 @@ void Module::LogTokenAccess(mdToken token, SectionFormat format, ULONG flagnum)
else if (tkKind == (SectionFormat) (ibcMethodSpec >> 24))
tkKind = IbcMethodSpecSection;
+ _ASSERTE(tkKind >= 0);
_ASSERTE(tkKind < SectionFormatCount);
- if (tkKind >= SectionFormatCount)
+ if (tkKind < 0 || tkKind >= SectionFormatCount)
{
return;
}
diff --git a/src/vm/ceemain.cpp b/src/vm/ceemain.cpp
index 9cce46d2f0..c03c40134b 100644
--- a/src/vm/ceemain.cpp
+++ b/src/vm/ceemain.cpp
@@ -693,6 +693,11 @@ void EEStartupHelper(COINITIEE fFlags)
InitThreadManager();
STRESS_LOG0(LF_STARTUP, LL_ALWAYS, "Returned successfully from InitThreadManager");
+#ifdef FEATURE_PERFTRACING
+ // Initialize the event pipe.
+ EventPipe::Initialize();
+#endif // FEATURE_PERFTRACING
+
#ifdef FEATURE_EVENT_TRACE
// Initialize event tracing early so we can trace CLR startup time events.
InitializeEventTracing();
@@ -1036,8 +1041,7 @@ void EEStartupHelper(COINITIEE fFlags)
#endif
#ifdef FEATURE_PERFTRACING
- // Initialize the event pipe and start it if requested.
- EventPipe::Initialize();
+ // Start the event pipe if requested.
EventPipe::EnableOnStartup();
#endif // FEATURE_PERFTRACING
@@ -1576,6 +1580,11 @@ void STDMETHODCALLTYPE EEShutDownHelper(BOOL fIsDllUnloading)
ETW::EnumerationLog::ProcessShutdown();
}
+#ifdef FEATURE_PERFTRACING
+ // Shutdown the event pipe.
+ EventPipe::Shutdown();
+#endif // FEATURE_PERFTRACING
+
#if defined(FEATURE_COMINTEROP)
// Get the current thread.
Thread * pThisThread = GetThread();
@@ -1708,11 +1717,6 @@ void STDMETHODCALLTYPE EEShutDownHelper(BOOL fIsDllUnloading)
PerfMap::Destroy();
#endif
-#ifdef FEATURE_PERFTRACING
- // Shutdown the event pipe.
- EventPipe::Shutdown();
-#endif // FEATURE_PERFTRACING
-
#ifdef FEATURE_PREJIT
{
// If we're doing basic block profiling, we need to write the log files to disk.
diff --git a/src/vm/class.h b/src/vm/class.h
index 6c74377012..e3ec0ba166 100644
--- a/src/vm/class.h
+++ b/src/vm/class.h
@@ -2502,6 +2502,17 @@ inline PCODE GetPreStubEntryPoint()
return GetEEFuncEntryPoint(ThePreStub);
}
+#if defined(HAS_COMPACT_ENTRYPOINTS) && defined(_TARGET_ARM_)
+
+EXTERN_C void STDCALL ThePreStubCompactARM();
+
+inline PCODE GetPreStubCompactARMEntryPoint()
+{
+ return GetEEFuncEntryPoint(ThePreStubCompactARM);
+}
+
+#endif // defined(HAS_COMPACT_ENTRYPOINTS) && defined(_TARGET_ARM_)
+
PCODE TheUMThunkPreStub();
PCODE TheVarargNDirectStub(BOOL hasRetBuffArg);
diff --git a/src/vm/clrex.cpp b/src/vm/clrex.cpp
index 1c1501e54d..ba040b7e81 100644
--- a/src/vm/clrex.cpp
+++ b/src/vm/clrex.cpp
@@ -2002,18 +2002,7 @@ void DECLSPEC_NORETURN EEFileLoadException::Throw(LPCWSTR path, HRESULT hr, Exce
if (hr == E_OUTOFMEMORY)
COMPlusThrowOM();
-#ifndef CROSSGEN_COMPILE
- // Remove path - location must be hidden for security purposes
-
- LPCWSTR pStart = wcsrchr(path, '\\');
- if (pStart != NULL)
- pStart++;
- else
- pStart = path;
-#else
- LPCWSTR pStart = path;
-#endif
- EX_THROW_WITH_INNER(EEFileLoadException, (StackSString(pStart), hr), pInnerException);
+ EX_THROW_WITH_INNER(EEFileLoadException, (StackSString(path), hr), pInnerException);
}
/* static */
diff --git a/src/vm/codeman.cpp b/src/vm/codeman.cpp
index a30e70e7fa..d934b824f6 100644
--- a/src/vm/codeman.cpp
+++ b/src/vm/codeman.cpp
@@ -1219,7 +1219,6 @@ EEJitManager::EEJitManager()
#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
m_JITCompilerOther = NULL;
#endif
- m_fLegacyJitUsed = FALSE;
#ifdef ALLOW_SXS_JIT
m_alternateJit = NULL;
@@ -1371,8 +1370,8 @@ void EEJitManager::SetCpuInfo()
enum JIT_LOAD_JIT_ID
{
JIT_LOAD_MAIN = 500, // The "main" JIT. Normally, this is named "clrjit.dll". Start at a number that is somewhat uncommon (i.e., not zero or 1) to help distinguish from garbage, in process dumps.
- JIT_LOAD_LEGACY, // The "legacy" JIT. Normally, this is named "compatjit.dll". This applies to AMD64 on Windows desktop, or x86 on Windows .NET Core.
- JIT_LOAD_ALTJIT // An "altjit". By default, named "protojit.dll". Used both internally, as well as externally for JIT CTP builds.
+ // 501 is JIT_LOAD_LEGACY on some platforms; please do not reuse this value.
+ JIT_LOAD_ALTJIT = 502 // An "altjit". By default, named "protojit.dll". Used both internally, as well as externally for JIT CTP builds.
};
enum JIT_LOAD_STATUS
@@ -1628,80 +1627,6 @@ BOOL EEJitManager::LoadJIT()
// Set as a courtesy to code:CorCompileGetRuntimeDll
s_ngenCompilerDll = m_JITCompiler;
-
-#if defined(_TARGET_X86_)
- // If COMPlus_UseLegacyJit=1, then we fall back to compatjit.dll.
- //
- // This fallback mechanism was introduced for Visual Studio "14" Preview, when JIT64 (the legacy JIT) was replaced with
- // RyuJIT. It was desired to provide a fallback mechanism in case comptibility problems (or other bugs)
- // were discovered by customers. Setting this COMPLUS variable to 1 does not affect NGEN: existing NGEN images continue
- // to be used, and all subsequent NGEN compilations continue to use the new JIT.
- //
- // If this is a compilation process, then we don't allow specifying a fallback JIT. This is a case where, when NGEN'ing,
- // we sometimes need to JIT some things (such as when we are NGEN'ing mscorlib). In that case, we want to use exactly
- // the same JIT as NGEN uses. And NGEN doesn't follow the COMPlus_UseLegacyJit=1 switch -- it always uses clrjit.dll.
- //
- // Note that we always load and initialize the default JIT. This is to handle cases where obfuscators rely on
- // LoadLibrary("clrjit.dll") returning the module handle of the JIT, and then they call GetProcAddress("getJit") to get
- // the EE-JIT interface. They also do this without also calling sxsJitStartup()!
- //
- // In addition, for reasons related to servicing, we only use RyuJIT when the registry value UseRyuJIT (type DWORD), under
- // key HKLM\SOFTWARE\Microsoft\.NETFramework, is set to 1. Otherwise, we fall back to JIT64. Note that if this value
- // is set, we also must use JIT64 for all NGEN compilations as well.
- //
- // See the document "RyuJIT Compatibility Fallback Specification.docx" for details.
- //
- // For .NET Core 1.2, RyuJIT for x86 is the primary jit (clrjit.dll) and JIT32 for x86 is the fallback, legacy JIT (compatjit.dll).
- // Thus, the COMPlus_useLegacyJit=1 mechanism has been enabled for x86 CoreCLR. This scenario does not have the UseRyuJIT
- // registry key, nor the AppX binder mode.
-
- bool fUseRyuJit = true;
-
- if ((!IsCompilationProcess() || !fUseRyuJit) && // Use RyuJIT for all NGEN, unless we're falling back to JIT64 for everything.
- (newJitCompiler != nullptr)) // the main JIT must successfully load before we try loading the fallback JIT
- {
- BOOL fUsingCompatJit = FALSE;
-
- if (!fUseRyuJit)
- {
- fUsingCompatJit = TRUE;
- }
-
- if (!fUsingCompatJit)
- {
- DWORD useLegacyJit = Configuration::GetKnobBooleanValue(W("System.JIT.UseWindowsX86CoreLegacyJit"), CLRConfig::EXTERNAL_UseWindowsX86CoreLegacyJit);
- if (useLegacyJit == 1)
- {
- fUsingCompatJit = TRUE;
- }
- }
-
-
- if (fUsingCompatJit)
- {
- // Now, load the compat jit and initialize it.
-
- LPCWSTR pwzJitName = MAKEDLLNAME_W(W("compatjit"));
-
- // Note: if the compatjit fails to load, we ignore it, and continue to use the main JIT for
- // everything. You can imagine a policy where if the user requests the compatjit, and we fail
- // to load it, that we fail noisily. We don't do that currently.
- ICorJitCompiler* fallbackICorJitCompiler;
- g_JitLoadData.jld_id = JIT_LOAD_LEGACY;
- LoadAndInitializeJIT(pwzJitName, &m_JITCompilerOther, &fallbackICorJitCompiler, &g_JitLoadData);
- if (fallbackICorJitCompiler != nullptr)
- {
- // Tell the main JIT to fall back to the "fallback" JIT compiler, in case some
- // obfuscator tries to directly call the main JIT's getJit() function.
- newJitCompiler->setRealJit(fallbackICorJitCompiler);
-
- // Now, the compat JIT will be used.
- m_fLegacyJitUsed = TRUE;
- }
- }
- }
-#endif // (defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)) || (defined(_TARGET_X86_) )
-
#endif // !FEATURE_MERGE_JIT_AND_ENGINE
#ifdef ALLOW_SXS_JIT
diff --git a/src/vm/codeman.h b/src/vm/codeman.h
index 9d7ed4d62f..cca5f5e2d2 100644
--- a/src/vm/codeman.h
+++ b/src/vm/codeman.h
@@ -1201,12 +1201,6 @@ public:
HINSTANCE m_JITCompilerOther; // Stores the handle of the legacy JIT, if one is loaded.
#endif
- // TRUE if the legacy/compat JIT was loaded successfully and will be used.
- // This is available in all builds so if COMPlus_RequireLegacyJit=1 is set in a test,
- // the test will fail in any build where the legacy JIT is not loaded, even if legacy
- // fallback is not available in that build. This prevents unexpected silent successes.
- BOOL m_fLegacyJitUsed;
-
#ifdef ALLOW_SXS_JIT
//put these at the end so that we don't mess up the offsets in the DAC.
ICorJitCompiler * m_alternateJit;
diff --git a/src/vm/comdependenthandle.cpp b/src/vm/comdependenthandle.cpp
index 6535a804ae..4763e4833a 100644
--- a/src/vm/comdependenthandle.cpp
+++ b/src/vm/comdependenthandle.cpp
@@ -17,22 +17,22 @@
-FCIMPL3(VOID, DependentHandle::nInitialize, Object *_primary, Object *_secondary, OBJECTHANDLE *outHandle)
+FCIMPL2(OBJECTHANDLE, DependentHandle::nInitialize, Object *_primary, Object *_secondary)
{
FCALL_CONTRACT;
- _ASSERTE(outHandle != NULL && *outHandle == NULL); // Multiple initializations disallowed
-
OBJECTREF primary(_primary);
OBJECTREF secondary(_secondary);
+ OBJECTHANDLE result = NULL;
- HELPER_METHOD_FRAME_BEGIN_NOPOLL();
+ HELPER_METHOD_FRAME_BEGIN_RET_NOPOLL();
// Create the handle.
- *outHandle = GetAppDomain()->CreateDependentHandle(primary, secondary);
+ result = GetAppDomain()->CreateDependentHandle(primary, secondary);
HELPER_METHOD_FRAME_END_POLL();
+ return result;
}
FCIMPLEND
@@ -55,22 +55,28 @@ FCIMPLEND
-FCIMPL2(VOID, DependentHandle::nGetPrimary, OBJECTHANDLE handle, Object **outPrimary)
+FCIMPL1(Object*, DependentHandle::nGetPrimary, OBJECTHANDLE handle)
{
FCALL_CONTRACT;
- _ASSERTE(handle != NULL && outPrimary != NULL);
- *outPrimary = OBJECTREFToObject(ObjectFromHandle(handle));
+ FCUnique(0x54);
+ _ASSERTE(handle != NULL);
+ return OBJECTREFToObject(ObjectFromHandle(handle));
}
FCIMPLEND
-FCIMPL3(VOID, DependentHandle::nGetPrimaryAndSecondary, OBJECTHANDLE handle, Object **outPrimary, Object **outSecondary)
+FCIMPL2(Object*, DependentHandle::nGetPrimaryAndSecondary, OBJECTHANDLE handle, Object **outSecondary)
{
FCALL_CONTRACT;
- _ASSERTE(handle != NULL && outPrimary != NULL && outSecondary != NULL);
- *outPrimary = OBJECTREFToObject(ObjectFromHandle(handle));
- *outSecondary = OBJECTREFToObject(GetDependentHandleSecondary(handle));
+ _ASSERTE(handle != NULL && outSecondary != NULL);
+
+ OBJECTREF primary = ObjectFromHandle(handle);
+
+ // Secondary is tracked only if primary is non-null
+ *outSecondary = (primary != NULL) ? OBJECTREFToObject(GetDependentHandleSecondary(handle)) : NULL;
+
+ return OBJECTREFToObject(primary);
}
FCIMPLEND
diff --git a/src/vm/comdependenthandle.h b/src/vm/comdependenthandle.h
index 7192a4bbc3..edc9a6b30f 100644
--- a/src/vm/comdependenthandle.h
+++ b/src/vm/comdependenthandle.h
@@ -41,12 +41,12 @@
class DependentHandle
{
public:
- static FCDECL3(VOID, nInitialize, Object *primary, Object *secondary, OBJECTHANDLE *outHandle);
- static FCDECL2(VOID, nGetPrimary, OBJECTHANDLE handle, Object **outPrimary);
- static FCDECL3(VOID, nGetPrimaryAndSecondary, OBJECTHANDLE handle, Object **outPrimary, Object **outSecondary);
- static FCDECL1(VOID, nFree, OBJECTHANDLE handle);
- static FCDECL2(VOID, nSetPrimary, OBJECTHANDLE handle, Object *primary);
- static FCDECL2(VOID, nSetSecondary, OBJECTHANDLE handle, Object *secondary);
+ static FCDECL2(OBJECTHANDLE, nInitialize, Object *primary, Object *secondary);
+ static FCDECL1(Object *, nGetPrimary, OBJECTHANDLE handle);
+ static FCDECL2(Object *, nGetPrimaryAndSecondary, OBJECTHANDLE handle, Object **outSecondary);
+ static FCDECL1(VOID, nFree, OBJECTHANDLE handle);
+ static FCDECL2(VOID, nSetPrimary, OBJECTHANDLE handle, Object *primary);
+ static FCDECL2(VOID, nSetSecondary, OBJECTHANDLE handle, Object *secondary);
};
#endif
diff --git a/src/vm/compile.cpp b/src/vm/compile.cpp
index 91615851c7..9727430398 100644
--- a/src/vm/compile.cpp
+++ b/src/vm/compile.cpp
@@ -652,22 +652,6 @@ HRESULT CEECompileInfo::SetCompilationTarget(CORINFO_ASSEMBLY_HANDLE assembl
}
}
-#ifdef FEATURE_READYTORUN_COMPILER
- if (IsReadyToRunCompilation() && !pModule->IsILOnly())
- {
- GetSvcLogger()->Printf(LogLevel_Error, W("Error: /readytorun not supported for mixed mode assemblies\n"));
- return E_FAIL;
- }
-#endif
-
-#ifdef FEATURE_READYTORUN_COMPILER
- if (IsReadyToRunCompilation() && !pModule->IsILOnly())
- {
- GetSvcLogger()->Printf(LogLevel_Error, W("Error: /readytorun not supported for mixed mode assemblies\n"));
- return E_FAIL;
- }
-#endif
-
return S_OK;
}
@@ -1723,6 +1707,12 @@ mdToken CEECompileInfo::TryEncodeMethodAsToken(
if (!pReferencingModule->IsInCurrentVersionBubble())
return mdTokenNil;
+ // If this is a MemberRef with TypeSpec, we might come to here because we resolved the method
+ // into a non-generic base class in the same version bubble. However, since we don't have the
+ // proper type context during ExternalMethodFixupWorker, we can't really encode using token
+ if (pResolvedToken->pTypeSpec != NULL)
+ return mdTokenNil;
+
unsigned methodToken = pResolvedToken->token;
switch (TypeFromToken(methodToken))
@@ -6803,7 +6793,7 @@ void CEEPreloader::GetRVAFieldData(mdFieldDef fd, PVOID * ppData, DWORD * pcbSiz
if (pFD == NULL)
ThrowHR(COR_E_TYPELOAD);
- _ASSERTE(pFD->IsILOnlyRVAField());
+ _ASSERTE(pFD->IsRVA());
UINT size = pFD->LoadSize();
diff --git a/src/vm/coreassemblyspec.cpp b/src/vm/coreassemblyspec.cpp
index 7cb1f56315..1d3567e769 100644
--- a/src/vm/coreassemblyspec.cpp
+++ b/src/vm/coreassemblyspec.cpp
@@ -275,8 +275,32 @@ STDAPI BinderAcquirePEImage(LPCWSTR wszAssemblyPath,
STDAPI BinderHasNativeHeader(PEImage *pPEImage, BOOL* result)
{
- *result = pPEImage->HasNativeHeader();
- return S_OK;
+ HRESULT hr = S_OK;
+
+ _ASSERTE(pPEImage != NULL);
+ _ASSERTE(result != NULL);
+
+ EX_TRY
+ {
+ *result = pPEImage->HasNativeHeader();
+ }
+ EX_CATCH_HRESULT(hr);
+
+ if (FAILED(hr))
+ {
+ *result = false;
+
+#if defined(FEATURE_PAL)
+ // PAL_LOADLoadPEFile may fail while loading IL masquerading as NI.
+ // This will result in a ThrowHR(E_FAIL). Suppress the error.
+ if(hr == E_FAIL)
+ {
+ hr = S_OK;
+ }
+#endif // defined(FEATURE_PAL)
+ }
+
+ return hr;
}
STDAPI BinderAcquireImport(PEImage *pPEImage,
diff --git a/src/vm/corhost.cpp b/src/vm/corhost.cpp
index fd27a7a4e7..3f53de2acb 100644
--- a/src/vm/corhost.cpp
+++ b/src/vm/corhost.cpp
@@ -2570,7 +2570,7 @@ HRESULT CCLRErrorReportingManager::BucketParamsCache::SetAt(BucketParameterIndex
{
LIMITED_METHOD_CONTRACT;
- if (index >= InvalidBucketParamIndex)
+ if (index < 0 || index >= InvalidBucketParamIndex)
{
_ASSERTE(!"bad bucket parameter index");
return E_INVALIDARG;
diff --git a/src/vm/coverage.cpp b/src/vm/coverage.cpp
deleted file mode 100644
index 2a5e5ffdfb..0000000000
--- a/src/vm/coverage.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-
-#include "common.h"
-
-#include "coverage.h"
-
-
-//
-// This is part of the runtime test teams Code Coverge Tools. Due to the special nature of MSCORLIB.dll
-// We have to work around several issues (Like the initilization of the Secutiry Manager) to be able to get
-// Code coverage on mscorlib.dll
-//
-
-FCIMPL1(unsigned __int64, COMCoverage::nativeCoverBlock, INT32 id)
-{
- FCALL_CONTRACT;
-
- unsigned __int64 retVal = 0;
- HELPER_METHOD_FRAME_BEGIN_RET_0();
-
- HMODULE ilcovnat = 0;
- if (id == 1)
- {
- ilcovnat = CLRLoadLibrary(W("Ilcovnat.dll"));
-
- if (ilcovnat)
- {
- retVal = (unsigned __int64)GetProcAddress(ilcovnat, "CoverBlockNative");
- }
- }
- else if (id == 2)
- {
- ilcovnat = CLRLoadLibrary(W("coverage.dll"));
-
- if (ilcovnat)
- {
- retVal = (unsigned __int64)GetProcAddress(ilcovnat, "CoverageRegisterBinaryWithStruct");
- }
- }
- else if (id == 3)
- {
- ilcovnat = CLRLoadLibrary(W("Ilcovnat.dll"));
- if (ilcovnat)
- {
- retVal = (unsigned __int64)GetProcAddress(ilcovnat, "CoverMonRegisterMscorlib");
- }
- }
-
- HELPER_METHOD_FRAME_END();
- return retVal;
-}
-FCIMPLEND
diff --git a/src/vm/coverage.h b/src/vm/coverage.h
deleted file mode 100644
index 9be2cc1c1e..0000000000
--- a/src/vm/coverage.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-
-#ifndef _COVERAGE_H_
-#define _COVERAGE_H_
-
-// Please see coverage.cpp for info on this file
-class COMCoverage
-{
-public:
- //typedef struct
- //{
- // DECLARE_ECALL_I4_ARG(INT32, id);
- //} _CoverageArgs;
- static FCDECL1(unsigned __int64, nativeCoverBlock, INT32 id);
-};
-#endif // _COVERAGE_H_
diff --git a/src/vm/dllimport.cpp b/src/vm/dllimport.cpp
index 3abe9cbf46..b58ac56b29 100644
--- a/src/vm/dllimport.cpp
+++ b/src/vm/dllimport.cpp
@@ -5938,19 +5938,7 @@ HMODULE NDirect::LoadLibraryModuleViaHost(NDirectMethodDesc * pMD, AppDomain* pD
// The Binding Context can be null or an overridden TPA context
if (pBindingContext == NULL)
{
- pBindingContext = nullptr;
-
- // If the assembly does not have a binder associated with it explicitly, then check if it is
- // a dynamic assembly, or not, since they can have a fallback load context associated with them.
- if (pManifestFile->IsDynamic())
- {
- pBindingContext = pManifestFile->GetFallbackLoadContextBinder();
- }
- }
-
- // If we do not have any binder associated, then return to the default resolution mechanism.
- if (pBindingContext == nullptr)
- {
+ // If we do not have any binder associated, then return to the default resolution mechanism.
return NULL;
}
diff --git a/src/vm/dwreport.cpp b/src/vm/dwreport.cpp
index b95c59ff8d..57d67e7c22 100644
--- a/src/vm/dwreport.cpp
+++ b/src/vm/dwreport.cpp
@@ -1526,30 +1526,28 @@ BOOL RunWatson(
return false;
}
+ {
+ BOOL ret = WszCreateProcess(watsonAppName,
+ watsonCommandLine,
+ NULL,
+ NULL,
+ TRUE,
+ NULL,
+ NULL,
+ NULL,
+ &startupInfo,
+ &processInformation);
+
+ if (FALSE == ret)
{
- BOOL ret = WszCreateProcess(watsonAppName,
- watsonCommandLine,
- NULL,
- NULL,
- TRUE,
- NULL,
- NULL,
- NULL,
- &startupInfo,
- &processInformation);
-
- if (FALSE == ret)
- {
- //
- // Watson failed to start up.
- //
- // This can happen if e.g. Watson wasn't installed on the machine.
- //
- return E_FAIL;
-
- }
-
+ //
+ // Watson failed to start up.
+ //
+ // This can happen if e.g. Watson wasn't installed on the machine.
+ //
+ return FALSE;
}
+ }
diff --git a/src/vm/ecalllist.h b/src/vm/ecalllist.h
index 5c3e5f82ba..39056a9198 100644
--- a/src/vm/ecalllist.h
+++ b/src/vm/ecalllist.h
@@ -519,6 +519,7 @@ FCFuncStart(gAppDomainFuncs)
#if FEATURE_COMINTEROP
FCFuncElement("nSetDisableInterfaceCache", AppDomainNative::SetDisableInterfaceCache)
#endif // FEATURE_COMINTEROP
+ FCFuncElement("nGetAssemblies", AppDomainNative::GetAssemblies)
FCFuncElement("nCreateContext", AppDomainNative::CreateContext)
FCFuncElement("GetId", AppDomainNative::GetId)
FCFuncElement("GetOrInternString", AppDomainNative::GetOrInternString)
@@ -593,7 +594,6 @@ FCFuncStart(gAssemblyFuncs)
QCFuncElement("GetVersion", AssemblyNative::GetVersion)
FCFuncElement("FCallIsDynamic", AssemblyNative::IsDynamic)
FCFuncElement("_nLoad", AssemblyNative::Load)
- FCFuncElement("nLoadImage", AssemblyNative::LoadImage)
QCFuncElement("GetType", AssemblyNative::GetType)
QCFuncElement("GetManifestResourceInfo", AssemblyNative::GetManifestResourceInfo)
QCFuncElement("GetModules", AssemblyNative::GetModules)
@@ -617,7 +617,6 @@ FCFuncEnd()
FCFuncStart(gAssemblyLoadContextFuncs)
QCFuncElement("InitializeAssemblyLoadContext", AssemblyNative::InitializeAssemblyLoadContext)
QCFuncElement("LoadFromPath", AssemblyNative::LoadFromPath)
- QCFuncElement("GetLoadedAssembliesInternal", AssemblyNative::GetLoadedAssembliesInternal)
QCFuncElement("InternalLoadUnmanagedDllFromPath", AssemblyNative::InternalLoadUnmanagedDllFromPath)
QCFuncElement("CanUseAppPathAssemblyLoadContextInCurrentDomain", AssemblyNative::CanUseAppPathAssemblyLoadContextInCurrentDomain)
QCFuncElement("LoadFromStream", AssemblyNative::LoadFromStream)
@@ -1237,10 +1236,6 @@ FCFuncStart(gStubHelperFuncs)
#endif //FEATURE_STUBS_AS_IL
FCFuncEnd()
-FCFuncStart(gCoverageFuncs)
- FCUnreferenced FCFuncElement("nativeCoverBlock", COMCoverage::nativeCoverBlock)
-FCFuncEnd()
-
FCFuncStart(gGCHandleFuncs)
FCFuncElement("InternalAlloc", MarshalNative::GCHandleInternalAlloc)
FCFuncElement("InternalFree", MarshalNative::GCHandleInternalFree)
@@ -1276,6 +1271,17 @@ FCFuncStart(gEventLogger)
FCFuncEnd()
#endif // defined(FEATURE_EVENTSOURCE_XPLAT)
+#ifdef FEATURE_PERFTRACING
+FCFuncStart(gEventPipeInternalFuncs)
+ QCFuncElement("Enable", EventPipeInternal::Enable)
+ QCFuncElement("Disable", EventPipeInternal::Disable)
+ QCFuncElement("CreateProvider", EventPipeInternal::CreateProvider)
+ QCFuncElement("DefineEvent", EventPipeInternal::DefineEvent)
+ QCFuncElement("DeleteProvider", EventPipeInternal::DeleteProvider)
+ QCFuncElement("WriteEvent", EventPipeInternal::WriteEvent)
+FCFuncEnd()
+#endif // FEATURE_PERFTRACING
+
#ifdef FEATURE_COMINTEROP
FCFuncStart(gRuntimeClassFuncs)
FCFuncElement("GetRedirectedGetHashCodeMD", ComObject::GetRedirectedGetHashCodeMD)
@@ -1379,6 +1385,9 @@ FCClassElement("Environment", "System", gEnvironmentFuncs)
#ifdef FEATURE_COMINTEROP
FCClassElement("EventArgsMarshaler", "System.StubHelpers", gEventArgsMarshalerFuncs)
#endif // FEATURE_COMINTEROP
+#if defined(FEATURE_PERFTRACING)
+FCClassElement("EventPipeInternal", "System.Diagnostics.Tracing", gEventPipeInternalFuncs)
+#endif // FEATURE_PERFTRACING
FCClassElement("Exception", "System", gExceptionFuncs)
FCClassElement("FileLoadException", "System.IO", gFileLoadExceptionFuncs)
FCClassElement("FormatterServices", "System.Runtime.Serialization", gSerializationFuncs)
@@ -1388,12 +1397,7 @@ FCClassElement("GCHandle", "System.Runtime.InteropServices", gGCHandleFuncs)
FCClassElement("IEnumerable", "System.Collections", gStdMngIEnumerableFuncs)
FCClassElement("IEnumerator", "System.Collections", gStdMngIEnumeratorFuncs)
FCClassElement("IExpando", "System.Runtime.InteropServices.Expando", gStdMngIExpandoFuncs)
-#endif // FEATURE_COMINTEROP
-FCClassElement("ILCover", "System.Coverage", gCoverageFuncs)
-#ifdef FEATURE_COMINTEROP
FCClassElement("IReflect", "System.Reflection", gStdMngIReflectFuncs)
-#endif
-#ifdef FEATURE_COMINTEROP
FCClassElement("InterfaceMarshaler", "System.StubHelpers", gInterfaceMarshalerFuncs)
#endif
FCClassElement("Interlocked", "System.Threading", gInterlockedFuncs)
diff --git a/src/vm/eeconfig.cpp b/src/vm/eeconfig.cpp
index 812d1df671..81f3957951 100644
--- a/src/vm/eeconfig.cpp
+++ b/src/vm/eeconfig.cpp
@@ -327,10 +327,6 @@ HRESULT EEConfig::Init()
iRequireZaps = REQUIRE_ZAPS_NONE;
-#ifdef _TARGET_AMD64_
- pDisableNativeImageLoadList = NULL;
-#endif
-
// new loader behavior switches
m_fDeveloperInstallation = false;
@@ -489,11 +485,6 @@ HRESULT EEConfig::Cleanup()
delete pForbidZapsExcludeList;
#endif
-#ifdef _TARGET_AMD64_
- if (pDisableNativeImageLoadList)
- delete pDisableNativeImageLoadList;
-#endif
-
#ifdef FEATURE_COMINTEROP
if (pszLogCCWRefCountChange)
delete [] pszLogCCWRefCountChange;
@@ -996,16 +987,6 @@ HRESULT EEConfig::sync()
}
#endif
-#ifdef _TARGET_AMD64_
- if (!IsCompilationProcess())
- {
- NewArrayHolder<WCHAR> wszDisableNativeImageLoadList;
- IfFailRet(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_DisableNativeImageLoadList, &wszDisableNativeImageLoadList));
- if (wszDisableNativeImageLoadList)
- pDisableNativeImageLoadList = new AssemblyNamesList(wszDisableNativeImageLoadList);
- }
-#endif
-
#ifdef FEATURE_LOADER_OPTIMIZATION
dwSharePolicy = GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_LoaderOptimization, dwSharePolicy);
#endif
@@ -1455,18 +1436,6 @@ bool EEConfig::ExcludeReadyToRun(LPCUTF8 assemblyName) const
return false;
}
-#ifdef _TARGET_AMD64_
-bool EEConfig::DisableNativeImageLoad(LPCUTF8 assemblyName) const
-{
- LIMITED_METHOD_CONTRACT;
-
- if (pDisableNativeImageLoadList != NULL && pDisableNativeImageLoadList->IsInList(assemblyName))
- return true;
-
- return false;
-}
-#endif
-
/**************************************************************/
#ifdef _DEBUG
/**************************************************************/
diff --git a/src/vm/eeconfig.h b/src/vm/eeconfig.h
index e97385e3da..ae23f74755 100644
--- a/src/vm/eeconfig.h
+++ b/src/vm/eeconfig.h
@@ -758,11 +758,6 @@ public:
bool ForbidZap(LPCUTF8 assemblyName) const;
#endif
bool ExcludeReadyToRun(LPCUTF8 assemblyName) const;
-
-#ifdef _TARGET_AMD64_
- bool DisableNativeImageLoad(LPCUTF8 assemblyName) const;
- bool IsDisableNativeImageLoadListNonEmpty() const { LIMITED_METHOD_CONTRACT; return (pDisableNativeImageLoadList != NULL); }
-#endif
LPCWSTR ZapSet() const { LIMITED_METHOD_CONTRACT; return pZapSet; }
@@ -1122,16 +1117,6 @@ private: //----------------------------------------------------------------
AssemblyNamesList * pForbidZapsExcludeList;
#endif
-#ifdef _TARGET_AMD64_
- // Assemblies for which we will not load a native image. This is from the COMPlus_DisableNativeImageLoadList
- // variable / reg key. It performs the same function as the config file key "<disableNativeImageLoad>" (except
- // that is it just a list of assembly names, which the config file key can specify full assembly identities).
- // This was added to support COMPlus_UseLegacyJit, to support the rollout of RyuJIT to replace JIT64, where
- // the user can cause the CLR to fall back to JIT64 for JITting but not for NGEN. This allows the user to
- // force JITting for a specified list of NGEN assemblies.
- AssemblyNamesList * pDisableNativeImageLoadList;
-#endif
-
LPCWSTR pZapSet;
bool fNgenBindOptimizeNonGac;
diff --git a/src/vm/eepolicy.cpp b/src/vm/eepolicy.cpp
index be5fa00ee5..6bd389f579 100644
--- a/src/vm/eepolicy.cpp
+++ b/src/vm/eepolicy.cpp
@@ -1105,6 +1105,98 @@ void EEPolicy::HandleExitProcess(ShutdownCompleteAction sca)
HandleExitProcessHelper(action, 0, sca);
}
+StackWalkAction LogCallstackForLogCallback(
+ CrawlFrame *pCF, //
+ VOID* pData // Caller's private data
+)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ SO_INTOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ SmallStackSString *pWordAt = ((SmallStackSString*)pData);
+
+ MethodDesc *pMD = pCF->GetFunction();
+ _ASSERTE(pMD != NULL);
+
+ StackSString str;
+ str = *pWordAt;
+
+ TypeString::AppendMethodInternal(str, pMD, TypeString::FormatNamespace|TypeString::FormatFullInst|TypeString::FormatSignature);
+ PrintToStdErrW(str.GetUnicode());
+ PrintToStdErrA("\n");
+
+ return SWA_CONTINUE;
+}
+
+//---------------------------------------------------------------------------------------
+//
+// A worker to save managed stack trace.
+//
+// Arguments:
+// reporter - EventReporter object for EventLog
+//
+// Return Value:
+// None
+//
+inline void LogCallstackForLogWorker()
+{
+ Thread* pThread = GetThread();
+ _ASSERTE (pThread);
+
+ SmallStackSString WordAt;
+
+ if (!WordAt.LoadResource(CCompRC::Optional, IDS_ER_WORDAT))
+ {
+ WordAt.Set(W(" at"));
+ }
+ else
+ {
+ WordAt.Insert(WordAt.Begin(), W(" "));
+ }
+ WordAt += W(" ");
+
+ pThread->StackWalkFrames(&LogCallstackForLogCallback, &WordAt, QUICKUNWIND | FUNCTIONSONLY);
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Generate an EventLog entry for unhandled exception.
+//
+// Arguments:
+// pExceptionInfo - Exception information
+//
+// Return Value:
+// None
+//
+inline void DoLogForFailFastException(LPCWSTR pszMessage, PEXCEPTION_POINTERS pExceptionInfo)
+{
+ WRAPPER_NO_CONTRACT;
+
+ Thread *pThread = GetThread();
+ EX_TRY
+ {
+ PrintToStdErrA("FailFast: ");
+ PrintToStdErrW((WCHAR*)pszMessage);
+ PrintToStdErrA("\n");
+
+ if (pThread)
+ {
+ PrintToStdErrA("\n");
+ LogCallstackForLogWorker();
+ }
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
+}
+
//
// Log an error to the event log if possible, then throw up a dialog box.
//
@@ -1117,6 +1209,12 @@ void EEPolicy::LogFatalError(UINT exitCode, UINT_PTR address, LPCWSTR pszMessage
_ASSERTE(pExceptionInfo != NULL);
+ // Log FailFast exception to StdErr
+ if (exitCode == (UINT)COR_E_FAILFAST)
+ {
+ DoLogForFailFastException(pszMessage, pExceptionInfo);
+ }
+
if(ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context, FailFast))
{
// Fire an ETW FailFast event
diff --git a/src/vm/eetoprofinterfaceimpl.cpp b/src/vm/eetoprofinterfaceimpl.cpp
index ca35717bec..75f4a02af2 100644
--- a/src/vm/eetoprofinterfaceimpl.cpp
+++ b/src/vm/eetoprofinterfaceimpl.cpp
@@ -415,6 +415,7 @@ EEToProfInterfaceImpl::EEToProfInterfaceImpl() :
m_pCallback6(NULL),
m_pCallback7(NULL),
m_pCallback8(NULL),
+ m_pCallback9(NULL),
m_hmodProfilerDLL(NULL),
m_fLoadedViaAttach(FALSE),
m_pProfToEE(NULL),
@@ -665,21 +666,25 @@ HRESULT EEToProfInterfaceImpl::CreateProfiler(
m_hmodProfilerDLL = hmodProfilerDLL.Extract();
hmodProfilerDLL = NULL;
- // The profiler may optionally support ICorProfilerCallback3,4,5,6,7,8. Let's check.
+ // The profiler may optionally support ICorProfilerCallback3,4,5,6,7,8,9. Let's check.
- ReleaseHolder<ICorProfilerCallback8> pCallback8;
+ ReleaseHolder<ICorProfilerCallback9> pCallback9;
hr = m_pCallback2->QueryInterface(
- IID_ICorProfilerCallback8,
- (LPVOID *)&pCallback8);
- if (SUCCEEDED(hr) && (pCallback8 != NULL))
+ IID_ICorProfilerCallback9,
+ (LPVOID *)&pCallback9);
+ if (SUCCEEDED(hr) && (pCallback9 != NULL))
{
// Nifty. Transfer ownership to this class
- _ASSERTE(m_pCallback8 == NULL);
- m_pCallback8 = pCallback8.Extract();
- pCallback8 = NULL;
+ _ASSERTE(m_pCallback9 == NULL);
+ m_pCallback9 = pCallback9.Extract();
+ pCallback9 = NULL;
- // And while we're at it, we must now also have an ICorProfilerCallback3,4,5,6,7
+ // And while we're at it, we must now also have an ICorProfilerCallback3,4,5,6,7,8
// due to inheritance relationship of the interfaces
+ _ASSERTE(m_pCallback8 == NULL);
+ m_pCallback8 = static_cast<ICorProfilerCallback8 *>(m_pCallback9);
+ m_pCallback8->AddRef();
+
_ASSERTE(m_pCallback7 == NULL);
m_pCallback7 = static_cast<ICorProfilerCallback7 *>(m_pCallback8);
m_pCallback7->AddRef();
@@ -701,6 +706,44 @@ HRESULT EEToProfInterfaceImpl::CreateProfiler(
m_pCallback3->AddRef();
}
+ if (m_pCallback8 == NULL)
+ {
+ ReleaseHolder<ICorProfilerCallback8> pCallback8;
+ hr = m_pCallback2->QueryInterface(
+ IID_ICorProfilerCallback8,
+ (LPVOID *)&pCallback8);
+ if (SUCCEEDED(hr) && (pCallback8 != NULL))
+ {
+ // Nifty. Transfer ownership to this class
+ _ASSERTE(m_pCallback8 == NULL);
+ m_pCallback8 = pCallback8.Extract();
+ pCallback8 = NULL;
+
+ // And while we're at it, we must now also have an ICorProfilerCallback3,4,5,6,7
+ // due to inheritance relationship of the interfaces
+
+ _ASSERTE(m_pCallback7 == NULL);
+ m_pCallback7 = static_cast<ICorProfilerCallback7 *>(m_pCallback8);
+ m_pCallback7->AddRef();
+
+ _ASSERTE(m_pCallback6 == NULL);
+ m_pCallback6 = static_cast<ICorProfilerCallback6 *>(m_pCallback7);
+ m_pCallback6->AddRef();
+
+ _ASSERTE(m_pCallback5 == NULL);
+ m_pCallback5 = static_cast<ICorProfilerCallback5 *>(m_pCallback6);
+ m_pCallback5->AddRef();
+
+ _ASSERTE(m_pCallback4 == NULL);
+ m_pCallback4 = static_cast<ICorProfilerCallback4 *>(m_pCallback5);
+ m_pCallback4->AddRef();
+
+ _ASSERTE(m_pCallback3 == NULL);
+ m_pCallback3 = static_cast<ICorProfilerCallback3 *>(m_pCallback4);
+ m_pCallback3->AddRef();
+ }
+ }
+
if (m_pCallback7 == NULL)
{
ReleaseHolder<ICorProfilerCallback7> pCallback7;
@@ -3218,6 +3261,38 @@ HRESULT EEToProfInterfaceImpl::JITCompilationStarted(FunctionID functionId,
}
}
+HRESULT EEToProfInterfaceImpl::DynamicMethodUnloaded(FunctionID functionId)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE; // RuntimeMethodHandle::Destroy (the caller) moves from QCALL to GCX_COOP
+ CAN_TAKE_LOCK;
+ SO_TOLERANT;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: DynamicMethodUnloaded 0x%p.\n",
+ functionId));
+
+ _ASSERTE(functionId);
+
+ if (m_pCallback9 == NULL)
+ {
+ return S_OK;
+ }
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback9->DynamicMethodUnloaded(functionId);
+ }
+}
+
HRESULT EEToProfInterfaceImpl::DynamicMethodJITCompilationFinished(FunctionID functionId,
HRESULT hrStatus,
BOOL fIsSafeToBlock)
diff --git a/src/vm/eetoprofinterfaceimpl.h b/src/vm/eetoprofinterfaceimpl.h
index 76797fcc26..63d1cadfbb 100644
--- a/src/vm/eetoprofinterfaceimpl.h
+++ b/src/vm/eetoprofinterfaceimpl.h
@@ -181,7 +181,10 @@ public:
FunctionID functionId,
HRESULT hrStatus,
BOOL fIsSafeToBlock);
-
+
+ HRESULT DynamicMethodUnloaded(
+ FunctionID functionId);
+
HRESULT JITCachedFunctionSearchStarted(
/* [in] */ FunctionID functionId,
/* [out] */ BOOL * pbUseCachedFunction);
@@ -541,7 +544,7 @@ private:
// Pointer to the profiler's implementation of the callback interface(s).
// Profilers MUST support ICorProfilerCallback2.
- // Profilers MAY optionally support ICorProfilerCallback3,4,5,6,7,8
+ // Profilers MAY optionally support ICorProfilerCallback3,4,5,6,7,8,9
ICorProfilerCallback2 * m_pCallback2;
ICorProfilerCallback3 * m_pCallback3;
ICorProfilerCallback4 * m_pCallback4;
@@ -549,6 +552,8 @@ private:
ICorProfilerCallback6 * m_pCallback6;
ICorProfilerCallback7 * m_pCallback7;
ICorProfilerCallback8 * m_pCallback8;
+ ICorProfilerCallback9 * m_pCallback9;
+
HMODULE m_hmodProfilerDLL;
BOOL m_fLoadedViaAttach;
diff --git a/src/vm/eventpipe.cpp b/src/vm/eventpipe.cpp
index 98d382ea17..e041615efc 100644
--- a/src/vm/eventpipe.cpp
+++ b/src/vm/eventpipe.cpp
@@ -4,21 +4,58 @@
#include "common.h"
#include "eventpipe.h"
+#include "eventpipebuffermanager.h"
+#include "eventpipeconfiguration.h"
+#include "eventpipeevent.h"
+#include "eventpipefile.h"
+#include "eventpipeprovider.h"
#include "eventpipejsonfile.h"
#include "sampleprofiler.h"
-CrstStatic EventPipe::s_initCrst;
+#ifdef FEATURE_PAL
+#include "pal.h"
+#endif // FEATURE_PAL
+
+#ifdef FEATURE_PERFTRACING
+
+CrstStatic EventPipe::s_configCrst;
bool EventPipe::s_tracingInitialized = false;
-bool EventPipe::s_tracingEnabled = false;
+EventPipeConfiguration* EventPipe::s_pConfig = NULL;
+EventPipeBufferManager* EventPipe::s_pBufferManager = NULL;
+EventPipeFile* EventPipe::s_pFile = NULL;
+#ifdef _DEBUG
+EventPipeFile* EventPipe::s_pSyncFile = NULL;
EventPipeJsonFile* EventPipe::s_pJsonFile = NULL;
+#endif // _DEBUG
+
+#ifdef FEATURE_PAL
+// This function is auto-generated from /src/scripts/genEventPipe.py
+extern "C" void InitProvidersAndEvents();
+#endif
+
+#ifdef FEATURE_PAL
+// This function is auto-generated from /src/scripts/genEventPipe.py
+extern "C" void InitProvidersAndEvents();
+#endif
void EventPipe::Initialize()
{
STANDARD_VM_CONTRACT;
- s_tracingInitialized = s_initCrst.InitNoThrow(
+ s_tracingInitialized = s_configCrst.InitNoThrow(
CrstEventPipe,
- (CrstFlags)(CRST_TAKEN_DURING_SHUTDOWN));
+ (CrstFlags)(CRST_REENTRANCY | CRST_TAKEN_DURING_SHUTDOWN));
+
+ s_pConfig = new EventPipeConfiguration();
+ s_pConfig->Initialize();
+
+ s_pBufferManager = new EventPipeBufferManager();
+
+#ifdef FEATURE_PAL
+ // This calls into auto-generated code to initialize the runtime providers
+ // and events so that the EventPipe configuration lock isn't taken at runtime
+ InitProvidersAndEvents();
+#endif
}
void EventPipe::EnableOnStartup()
@@ -32,9 +69,15 @@ void EventPipe::EnableOnStartup()
CONTRACTL_END;
// Test COMPLUS variable to enable tracing at start-up.
- if(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_PerformanceTracing) != 0)
+ if((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_PerformanceTracing) & 1) == 1)
{
- Enable();
+ SString outputPath;
+ outputPath.Printf("Process-%d.netperf", GetCurrentProcessId());
+ Enable(
+ outputPath.GetUnicode(),
+ 1024 /* 1 GB circular buffer */,
+ NULL /* pProviders */,
+ 0 /* numProviders */);
}
}
@@ -49,9 +92,24 @@ void EventPipe::Shutdown()
CONTRACTL_END;
Disable();
+
+ if(s_pConfig != NULL)
+ {
+ delete(s_pConfig);
+ s_pConfig = NULL;
+ }
+ if(s_pBufferManager != NULL)
+ {
+ delete(s_pBufferManager);
+ s_pBufferManager = NULL;
+ }
}
-void EventPipe::Enable()
+void EventPipe::Enable(
+ LPCWSTR strOutputPath,
+ uint circularBufferSizeInMB,
+ EventPipeProviderConfiguration *pProviders,
+ int numProviders)
{
CONTRACTL
{
@@ -61,22 +119,38 @@ void EventPipe::Enable()
}
CONTRACTL_END;
- if(!s_tracingInitialized)
+ // If tracing is not initialized or is already enabled, bail here.
+ if(!s_tracingInitialized || s_pConfig->Enabled())
{
return;
}
- // Take the lock and enable tracing.
- CrstHolder _crst(&s_initCrst);
- s_tracingEnabled = true;
- if(CLRConfig::GetConfigValue(CLRConfig::INTERNAL_PerformanceTracing) == 2)
+ // Take the lock before enabling tracing.
+ CrstHolder _crst(GetLock());
+
+ // Create the event pipe file.
+ SString eventPipeFileOutputPath(strOutputPath);
+ s_pFile = new EventPipeFile(eventPipeFileOutputPath);
+
+#ifdef _DEBUG
+ if((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_PerformanceTracing) & 2) == 2)
{
- // File placed in current working directory.
+ // Create a synchronous file.
+ SString eventPipeSyncFileOutputPath;
+ eventPipeSyncFileOutputPath.Printf("Process-%d.sync.netperf", GetCurrentProcessId());
+ s_pSyncFile = new EventPipeFile(eventPipeSyncFileOutputPath);
+
+ // Create a JSON file.
SString outputFilePath;
outputFilePath.Printf("Process-%d.PerfView.json", GetCurrentProcessId());
s_pJsonFile = new EventPipeJsonFile(outputFilePath);
}
+#endif // _DEBUG
+ // Enable tracing.
+ s_pConfig->Enable(circularBufferSizeInMB, pProviders, numProviders);
+
+ // Enable the sample profiler
SampleProfiler::Enable();
}
@@ -90,76 +164,246 @@ void EventPipe::Disable()
}
CONTRACTL_END;
- CrstHolder _crst(&s_initCrst);
- s_tracingEnabled = false;
- SampleProfiler::Disable();
+ // Don't block GC during clean-up.
+ GCX_PREEMP();
+
+ // Take the lock before disabling tracing.
+ CrstHolder _crst(GetLock());
+
+ if(s_pConfig->Enabled())
+ {
+ // Disable the profiler.
+ SampleProfiler::Disable();
+
+ // Disable tracing.
+ s_pConfig->Disable();
+
+ // Flush all write buffers to make sure that all threads see the change.
+ FlushProcessWriteBuffers();
+
+ // Write to the file.
+ LARGE_INTEGER disableTimeStamp;
+ QueryPerformanceCounter(&disableTimeStamp);
+ s_pBufferManager->WriteAllBuffersToFile(s_pFile, disableTimeStamp);
+
+ // Before closing the file, do rundown.
+ s_pConfig->EnableRundown();
+
+ // Ask the runtime to emit rundown events.
+ if(g_fEEStarted && !g_fEEShutDown)
+ {
+ ETW::EnumerationLog::EndRundown();
+ }
+
+ // Disable the event pipe now that rundown is complete.
+ s_pConfig->Disable();
+
+ if(s_pFile != NULL)
+ {
+ delete(s_pFile);
+ s_pFile = NULL;
+ }
+#ifdef _DEBUG
+ if(s_pSyncFile != NULL)
+ {
+ delete(s_pSyncFile);
+ s_pSyncFile = NULL;
+ }
+ if(s_pJsonFile != NULL)
+ {
+ delete(s_pJsonFile);
+ s_pJsonFile = NULL;
+ }
+#endif // _DEBUG
+
+ // De-allocate buffers.
+ s_pBufferManager->DeAllocateBuffers();
+
+ // Delete deferred providers.
+ // Providers can't be deleted during tracing because they may be needed when serializing the file.
+ s_pConfig->DeleteDeferredProviders();
+ }
+}
- if(s_pJsonFile != NULL)
+bool EventPipe::Enabled()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ bool enabled = false;
+ if(s_pConfig != NULL)
{
- delete(s_pJsonFile);
- s_pJsonFile = NULL;
+ enabled = s_pConfig->Enabled();
}
+
+ return enabled;
}
-bool EventPipe::EventEnabled(GUID& providerID, INT64 keyword)
+EventPipeProvider* EventPipe::CreateProvider(const GUID &providerID, EventPipeCallback pCallbackFunction, void *pCallbackData)
{
CONTRACTL
{
- NOTHROW;
- GC_NOTRIGGER;
+ THROWS;
+ GC_TRIGGERS;
MODE_ANY;
}
CONTRACTL_END;
- // TODO: Implement filtering.
- return false;
+ return new EventPipeProvider(providerID, pCallbackFunction, pCallbackData);
}
-void EventPipe::WriteEvent(GUID& providerID, INT64 eventID, BYTE *pData, size_t length, bool sampleStack)
+void EventPipe::DeleteProvider(EventPipeProvider *pProvider)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Take the lock to make sure that we don't have a race
+ // between disabling tracing and deleting a provider
+ // where we hold a provider after tracing has been disabled.
+ CrstHolder _crst(GetLock());
+
+ if(pProvider != NULL)
+ {
+ if(Enabled())
+ {
+ // Save the provider until the end of the tracing session.
+ pProvider->SetDeleteDeferred();
+ }
+ else
+ {
+ // Delete the provider now.
+ // NOTE: This will remove it from all of the EventPipe data structures.
+ delete(pProvider);
+ }
+ }
+}
+
+void EventPipe::WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
+ PRECONDITION(s_pBufferManager != NULL);
}
CONTRACTL_END;
- StackContents stackContents;
- bool stackWalkSucceeded;
+ // Exit early if the event is not enabled.
+ if(!event.IsEnabled())
+ {
+ return;
+ }
- if(sampleStack)
+ // Get the current thread;
+ Thread *pThread = GetThread();
+ if(pThread == NULL)
{
- stackWalkSucceeded = WalkManagedStackForCurrentThread(stackContents);
+ // We can't write an event without the thread object.
+ return;
}
- // TODO: Write the event.
+ if(!s_pConfig->RundownEnabled() && s_pBufferManager != NULL)
+ {
+ if(!s_pBufferManager->WriteEvent(pThread, event, pData, length, pActivityId, pRelatedActivityId))
+ {
+ // This is used in DEBUG to make sure that we don't log an event synchronously that we didn't log to the buffer.
+ return;
+ }
+ }
+ else if(s_pConfig->RundownEnabled())
+ {
+ // Write synchronously to the file.
+ // We're under lock and blocking the disabling thread.
+ EventPipeEventInstance instance(
+ event,
+ pThread->GetOSThreadId(),
+ pData,
+ length,
+ pActivityId,
+ pRelatedActivityId);
+
+ if(s_pFile != NULL)
+ {
+ s_pFile->WriteEvent(instance);
+ }
+ }
+
+#ifdef _DEBUG
+ {
+ GCX_PREEMP();
+
+ // Create an instance of the event for the synchronous path.
+ EventPipeEventInstance instance(
+ event,
+ pThread->GetOSThreadId(),
+ pData,
+ length,
+ pActivityId,
+ pRelatedActivityId);
+
+ // Write to the EventPipeFile if it exists.
+ if(s_pSyncFile != NULL)
+ {
+ s_pSyncFile->WriteEvent(instance);
+ }
+
+ // Write to the EventPipeJsonFile if it exists.
+ if(s_pJsonFile != NULL)
+ {
+ s_pJsonFile->WriteEvent(instance);
+ }
+ }
+#endif // _DEBUG
}
-void EventPipe::WriteSampleProfileEvent(Thread *pThread, StackContents &stackContents)
+void EventPipe::WriteSampleProfileEvent(Thread *pSamplingThread, EventPipeEvent *pEvent, Thread *pTargetThread, StackContents &stackContents, BYTE *pData, unsigned int length)
{
CONTRACTL
{
NOTHROW;
GC_TRIGGERS;
MODE_PREEMPTIVE;
- PRECONDITION(pThread != NULL);
}
CONTRACTL_END;
- EX_TRY
+ // Write the event to the thread's buffer.
+ if(s_pBufferManager != NULL)
{
- if(s_pJsonFile != NULL)
+ // Specify the sampling thread as the "current thread", so that we select the right buffer.
+ // Specify the target thread so that the event gets properly attributed.
+ if(!s_pBufferManager->WriteEvent(pSamplingThread, *pEvent, pData, length, NULL /* pActivityId */, NULL /* pRelatedActivityId */, pTargetThread, &stackContents))
{
- CommonEventFields eventFields;
- QueryPerformanceCounter(&eventFields.TimeStamp);
- eventFields.ThreadID = pThread->GetOSThreadId();
+ // This is used in DEBUG to make sure that we don't log an event synchronously that we didn't log to the buffer.
+ return;
+ }
+ }
+
+#ifdef _DEBUG
+ {
+ GCX_PREEMP();
+
+ // Create an instance for the synchronous path.
+ SampleProfilerEventInstance instance(*pEvent, pTargetThread, pData, length);
+ stackContents.CopyTo(instance.GetStack());
- static SString message(W("THREAD_TIME"));
- s_pJsonFile->WriteEvent(eventFields, message, stackContents);
+ // Write to the EventPipeFile.
+ if(s_pSyncFile != NULL)
+ {
+ s_pSyncFile->WriteEvent(instance);
+ }
+
+ // Write to the EventPipeJsonFile if it exists.
+ if(s_pJsonFile != NULL)
+ {
+ s_pJsonFile->WriteEvent(instance);
}
}
- EX_CATCH{} EX_END_CATCH(SwallowAllExceptions);
+#endif // _DEBUG
}
bool EventPipe::WalkManagedStackForCurrentThread(StackContents &stackContents)
@@ -173,8 +417,12 @@ bool EventPipe::WalkManagedStackForCurrentThread(StackContents &stackContents)
CONTRACTL_END;
Thread *pThread = GetThread();
- _ASSERTE(pThread != NULL);
- return WalkManagedStackForThread(pThread, stackContents);
+ if(pThread != NULL)
+ {
+ return WalkManagedStackForThread(pThread, stackContents);
+ }
+
+ return false;
}
bool EventPipe::WalkManagedStackForThread(Thread *pThread, StackContents &stackContents)
@@ -232,3 +480,119 @@ StackWalkAction EventPipe::StackWalkCallback(CrawlFrame *pCf, StackContents *pDa
// Continue the stack walk.
return SWA_CONTINUE;
}
+
+EventPipeConfiguration* EventPipe::GetConfiguration()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return s_pConfig;
+}
+
+CrstStatic* EventPipe::GetLock()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return &s_configCrst;
+}
+
+void QCALLTYPE EventPipeInternal::Enable(
+ __in_z LPCWSTR outputFile,
+ unsigned int circularBufferSizeInMB,
+ long profilerSamplingRateInNanoseconds,
+ EventPipeProviderConfiguration *pProviders,
+ int numProviders)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ SampleProfiler::SetSamplingRate(profilerSamplingRateInNanoseconds);
+ EventPipe::Enable(outputFile, circularBufferSizeInMB, pProviders, numProviders);
+ END_QCALL;
+}
+
+void QCALLTYPE EventPipeInternal::Disable()
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+ EventPipe::Disable();
+ END_QCALL;
+}
+
+INT_PTR QCALLTYPE EventPipeInternal::CreateProvider(
+ GUID providerID,
+ EventPipeCallback pCallbackFunc)
+{
+ QCALL_CONTRACT;
+
+ EventPipeProvider *pProvider = NULL;
+
+ BEGIN_QCALL;
+
+ pProvider = EventPipe::CreateProvider(providerID, pCallbackFunc, NULL);
+
+ END_QCALL;
+
+ return reinterpret_cast<INT_PTR>(pProvider);
+}
+
+INT_PTR QCALLTYPE EventPipeInternal::DefineEvent(
+ INT_PTR provHandle,
+ unsigned int eventID,
+ __int64 keywords,
+ unsigned int eventVersion,
+ unsigned int level,
+ void *pMetadata,
+ unsigned int metadataLength)
+{
+ QCALL_CONTRACT;
+
+ EventPipeEvent *pEvent = NULL;
+
+ BEGIN_QCALL;
+
+ _ASSERTE(provHandle != NULL);
+ _ASSERTE(pMetadata != NULL);
+ EventPipeProvider *pProvider = reinterpret_cast<EventPipeProvider *>(provHandle);
+ pEvent = pProvider->AddEvent(eventID, keywords, eventVersion, (EventPipeEventLevel)level, (BYTE *)pMetadata, metadataLength);
+ _ASSERTE(pEvent != NULL);
+
+ END_QCALL;
+
+ return reinterpret_cast<INT_PTR>(pEvent);
+}
+
+void QCALLTYPE EventPipeInternal::DeleteProvider(
+ INT_PTR provHandle)
+{
+ QCALL_CONTRACT;
+ BEGIN_QCALL;
+
+ if(provHandle != NULL)
+ {
+ EventPipeProvider *pProvider = reinterpret_cast<EventPipeProvider*>(provHandle);
+ EventPipe::DeleteProvider(pProvider);
+ }
+
+ END_QCALL;
+}
+
+void QCALLTYPE EventPipeInternal::WriteEvent(
+ INT_PTR eventHandle,
+ unsigned int eventID,
+ void *pData,
+ unsigned int length,
+ LPCGUID pActivityId,
+ LPCGUID pRelatedActivityId)
+{
+ QCALL_CONTRACT;
+ BEGIN_QCALL;
+
+ _ASSERTE(eventHandle != NULL);
+ EventPipeEvent *pEvent = reinterpret_cast<EventPipeEvent *>(eventHandle);
+ EventPipe::WriteEvent(*pEvent, (BYTE *)pData, length, pActivityId, pRelatedActivityId);
+
+ END_QCALL;
+}
+
+#endif // FEATURE_PERFTRACING
diff --git a/src/vm/eventpipe.h b/src/vm/eventpipe.h
index 2978412325..a69e1ba840 100644
--- a/src/vm/eventpipe.h
+++ b/src/vm/eventpipe.h
@@ -5,19 +5,29 @@
#ifndef __EVENTPIPE_H__
#define __EVENTPIPE_H__
-#include "common.h"
+#ifdef FEATURE_PERFTRACING
+class CrstStatic;
+class EventPipeConfiguration;
+class EventPipeEvent;
+class EventPipeFile;
class EventPipeJsonFile;
-
-// The data fields common to every event.
-struct CommonEventFields
-{
- // Timestamp generated by QueryPerformanceCounter.
- LARGE_INTEGER TimeStamp;
-
- // Thread ID.
- DWORD ThreadID;
-};
+class EventPipeBuffer;
+class EventPipeBufferManager;
+class EventPipeProvider;
+class MethodDesc;
+class SampleProfilerEventInstance;
+struct EventPipeProviderConfiguration;
+
+// Define the event pipe callback to match the ETW callback signature.
+typedef void (*EventPipeCallback)(
+ LPCGUID SourceID,
+ ULONG IsEnabled,
+ UCHAR Level,
+ ULONGLONG MatchAnyKeywords,
+ ULONGLONG MatchAllKeywords,
+ void *FilterData,
+ void *CallbackContext);
class StackContents
{
@@ -29,9 +39,11 @@ private:
// Top of stack is at index 0.
UINT_PTR m_stackFrames[MAX_STACK_DEPTH];
+#ifdef _DEBUG
// Parallel array of MethodDesc pointers.
// Used for debug-only stack printing.
MethodDesc* m_methods[MAX_STACK_DEPTH];
+#endif // _DEBUG
// The next available slot in StackFrames.
unsigned int m_nextAvailableFrame;
@@ -45,6 +57,18 @@ public:
Reset();
}
+ void CopyTo(StackContents *pDest)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pDest != NULL);
+
+ memcpy_s(pDest->m_stackFrames, MAX_STACK_DEPTH * sizeof(UINT_PTR), m_stackFrames, sizeof(UINT_PTR) * m_nextAvailableFrame);
+#ifdef _DEBUG
+ memcpy_s(pDest->m_methods, MAX_STACK_DEPTH * sizeof(MethodDesc*), m_methods, sizeof(MethodDesc*) * m_nextAvailableFrame);
+#endif
+ pDest->m_nextAvailableFrame = m_nextAvailableFrame;
+ }
+
void Reset()
{
LIMITED_METHOD_CONTRACT;
@@ -79,6 +103,7 @@ public:
return m_stackFrames[frameIndex];
}
+#ifdef _DEBUG
MethodDesc* GetMethod(unsigned int frameIndex)
{
LIMITED_METHOD_CONTRACT;
@@ -91,6 +116,7 @@ public:
return m_methods[frameIndex];
}
+#endif // _DEBUG
void Append(UINT_PTR controlPC, MethodDesc *pMethod)
{
@@ -99,14 +125,37 @@ public:
if(m_nextAvailableFrame < MAX_STACK_DEPTH)
{
m_stackFrames[m_nextAvailableFrame] = controlPC;
+#ifdef _DEBUG
m_methods[m_nextAvailableFrame] = pMethod;
+#endif
m_nextAvailableFrame++;
}
}
+
+ BYTE* GetPointer() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (BYTE*)m_stackFrames;
+ }
+
+ unsigned int GetSize() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_nextAvailableFrame * sizeof(UINT_PTR));
+ }
};
class EventPipe
{
+ // Declare friends.
+ friend class EventPipeConfiguration;
+ friend class EventPipeFile;
+ friend class EventPipeProvider;
+ friend class EventPipeBufferManager;
+ friend class SampleProfiler;
+
public:
// Initialize the event pipe.
@@ -119,20 +168,30 @@ class EventPipe
static void EnableOnStartup();
// Enable tracing via the event pipe.
- static void Enable();
+ static void Enable(
+ LPCWSTR strOutputPath,
+ uint circularBufferSizeInMB,
+ EventPipeProviderConfiguration *pProviders,
+ int numProviders);
// Disable tracing via the event pipe.
static void Disable();
- // Determine whether or not the specified provider/keyword combination is enabled.
- static bool EventEnabled(GUID& providerID, INT64 keyword);
+ // Specifies whether or not the event pipe is enabled.
+ static bool Enabled();
+
+ // Create a provider.
+ static EventPipeProvider* CreateProvider(const GUID &providerID, EventPipeCallback pCallbackFunction = NULL, void *pCallbackData = NULL);
- // Write out an event. The event is identified by the providerID/eventID pair.
+ // Delete a provider.
+ static void DeleteProvider(EventPipeProvider *pProvider);
+
+ // Write out an event.
// Data is written as a serialized blob matching the ETW serialization conventions.
- static void WriteEvent(GUID& providerID, INT64 eventID, BYTE *pData, size_t length, bool sampleStack);
+ static void WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId = NULL, LPCGUID pRelatedActivityId = NULL);
- // Write out a sample profile event with the specified stack.
- static void WriteSampleProfileEvent(Thread *pThread, StackContents &stackContents);
+ // Write out a sample profile event.
+ static void WriteSampleProfileEvent(Thread *pSamplingThread, EventPipeEvent *pEvent, Thread *pTargetThread, StackContents &stackContents, BYTE *pData = NULL, unsigned int length = 0);
// Get the managed call stack for the current thread.
static bool WalkManagedStackForCurrentThread(StackContents &stackContents);
@@ -145,10 +204,111 @@ class EventPipe
// Callback function for the stack walker. For each frame walked, this callback is invoked.
static StackWalkAction StackWalkCallback(CrawlFrame *pCf, StackContents *pData);
- static CrstStatic s_initCrst;
+ // Get the configuration object.
+ // This is called directly by the EventPipeProvider constructor to register the new provider.
+ static EventPipeConfiguration* GetConfiguration();
+
+ // Get the event pipe configuration lock.
+ static CrstStatic* GetLock();
+
+ static CrstStatic s_configCrst;
static bool s_tracingInitialized;
- static bool s_tracingEnabled;
+ static EventPipeConfiguration *s_pConfig;
+ static EventPipeBufferManager *s_pBufferManager;
+ static EventPipeFile *s_pFile;
+#ifdef _DEBUG
+ static EventPipeFile *s_pSyncFile;
static EventPipeJsonFile *s_pJsonFile;
+#endif // _DEBUG
};
+struct EventPipeProviderConfiguration
+{
+
+private:
+
+ LPCWSTR m_pProviderName;
+ UINT64 m_keywords;
+ unsigned int m_loggingLevel;
+
+public:
+
+ EventPipeProviderConfiguration()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pProviderName = NULL;
+ m_keywords = NULL;
+ m_loggingLevel = 0;
+ }
+
+ EventPipeProviderConfiguration(
+ LPCWSTR pProviderName,
+ UINT64 keywords,
+ unsigned int loggingLevel)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pProviderName = pProviderName;
+ m_keywords = keywords;
+ m_loggingLevel = loggingLevel;
+ }
+
+ LPCWSTR GetProviderName() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pProviderName;
+ }
+
+ UINT64 GetKeywords() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_keywords;
+ }
+
+ unsigned int GetLevel() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_loggingLevel;
+ }
+};
+
+class EventPipeInternal
+{
+
+public:
+
+ static void QCALLTYPE Enable(
+ __in_z LPCWSTR outputFile,
+ unsigned int circularBufferSizeInMB,
+ long profilerSamplingRateInNanoseconds,
+ EventPipeProviderConfiguration *pProviders,
+ int numProviders);
+
+ static void QCALLTYPE Disable();
+
+ static INT_PTR QCALLTYPE CreateProvider(
+ GUID providerID,
+ EventPipeCallback pCallbackFunc);
+
+ static INT_PTR QCALLTYPE DefineEvent(
+ INT_PTR provHandle,
+ unsigned int eventID,
+ __int64 keywords,
+ unsigned int eventVersion,
+ unsigned int level,
+ void *pMetadata,
+ unsigned int metadataLength);
+
+ static void QCALLTYPE DeleteProvider(
+ INT_PTR provHandle);
+
+ static void QCALLTYPE WriteEvent(
+ INT_PTR eventHandle,
+ unsigned int eventID,
+ void *pData,
+ unsigned int length,
+ LPCGUID pActivityId, LPCGUID pRelatedActivityId);
+};
+
+#endif // FEATURE_PERFTRACING
+
#endif // __EVENTPIPE_H__
diff --git a/src/vm/eventpipebuffer.cpp b/src/vm/eventpipebuffer.cpp
new file mode 100644
index 0000000000..00652c9fac
--- /dev/null
+++ b/src/vm/eventpipebuffer.cpp
@@ -0,0 +1,281 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+
+#include "common.h"
+#include "eventpipeeventinstance.h"
+#include "eventpipebuffer.h"
+
+#ifdef FEATURE_PERFTRACING
+
+EventPipeBuffer::EventPipeBuffer(unsigned int bufferSize)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pBuffer = new BYTE[bufferSize];
+ memset(m_pBuffer, 0, bufferSize);
+ m_pCurrent = m_pBuffer;
+ m_pLimit = m_pBuffer + bufferSize;
+
+ m_mostRecentTimeStamp.QuadPart = 0;
+ m_pLastPoppedEvent = NULL;
+ m_pPrevBuffer = NULL;
+ m_pNextBuffer = NULL;
+}
+
+EventPipeBuffer::~EventPipeBuffer()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(m_pBuffer != NULL)
+ {
+ delete[] m_pBuffer;
+ }
+}
+
+bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int dataLength, LPCGUID pActivityId, LPCGUID pRelatedActivityId, StackContents *pStack)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(pThread != NULL);
+ }
+ CONTRACTL_END;
+
+ // Calculate the size of the event.
+ unsigned int eventSize = sizeof(EventPipeEventInstance) + dataLength;
+
+ // Make sure we have enough space to write the event.
+ if(m_pCurrent + eventSize >= m_pLimit)
+ {
+ return false;
+ }
+
+ // Calculate the location of the data payload.
+ BYTE *pDataDest = m_pCurrent + sizeof(EventPipeEventInstance);
+
+ bool success = true;
+ EX_TRY
+ {
+ // Placement-new the EventPipeEventInstance.
+ EventPipeEventInstance *pInstance = new (m_pCurrent) EventPipeEventInstance(
+ event,
+ pThread->GetOSThreadId(),
+ pDataDest,
+ dataLength,
+ pActivityId,
+ pRelatedActivityId);
+
+ // Copy the stack if a separate stack trace was provided.
+ if(pStack != NULL)
+ {
+ StackContents *pInstanceStack = pInstance->GetStack();
+ pStack->CopyTo(pInstanceStack);
+ }
+
+ // Write the event payload data to the buffer.
+ if(dataLength > 0)
+ {
+ memcpy(pDataDest, pData, dataLength);
+ }
+
+ // Save the most recent event timestamp.
+ m_mostRecentTimeStamp = pInstance->GetTimeStamp();
+
+ }
+ EX_CATCH
+ {
+ // If a failure occurs, bail out and don't advance the pointer.
+ success = false;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+
+ if(success)
+ {
+ // Advance the current pointer past the event.
+ m_pCurrent += eventSize;
+ }
+
+ return success;
+}
+
+LARGE_INTEGER EventPipeBuffer::GetMostRecentTimeStamp() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_mostRecentTimeStamp;
+}
+
+void EventPipeBuffer::Clear()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ memset(m_pBuffer, 0, (size_t)(m_pLimit - m_pBuffer));
+ m_pCurrent = m_pBuffer;
+ m_mostRecentTimeStamp.QuadPart = 0;
+ m_pLastPoppedEvent = NULL;
+}
+
+EventPipeEventInstance* EventPipeBuffer::GetNext(EventPipeEventInstance *pEvent, LARGE_INTEGER beforeTimeStamp)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EventPipeEventInstance *pNextInstance = NULL;
+ // If input is NULL, return the first event if there is one.
+ if(pEvent == NULL)
+ {
+ // If this buffer contains an event, select it.
+ if(m_pCurrent > m_pBuffer)
+ {
+ pNextInstance = (EventPipeEventInstance*)m_pBuffer;
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+ else
+ {
+ // Confirm that pEvent is within the used range of the buffer.
+ if(((BYTE*)pEvent < m_pBuffer) || ((BYTE*)pEvent >= m_pCurrent))
+ {
+ _ASSERT(!"Input pointer is out of range.");
+ return NULL;
+ }
+
+ // We have a pointer within the bounds of the buffer.
+ // Find the next event by skipping the current event with it's data payload immediately after the instance.
+ pNextInstance = (EventPipeEventInstance *)(pEvent->GetData() + pEvent->GetLength());
+
+ // Check to see if we've reached the end of the written portion of the buffer.
+ if((BYTE*)pNextInstance >= m_pCurrent)
+ {
+ return NULL;
+ }
+ }
+
+ // Ensure that the timestamp is valid. The buffer is zero'd before use, so a zero timestamp is invalid.
+ LARGE_INTEGER nextTimeStamp = pNextInstance->GetTimeStamp();
+ if(nextTimeStamp.QuadPart == 0)
+ {
+ return NULL;
+ }
+
+ // Ensure that the timestamp is earlier than the beforeTimeStamp.
+ if(nextTimeStamp.QuadPart >= beforeTimeStamp.QuadPart)
+ {
+ return NULL;
+ }
+
+ return pNextInstance;
+}
+
+EventPipeEventInstance* EventPipeBuffer::PeekNext(LARGE_INTEGER beforeTimeStamp)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Get the next event using the last popped event as a marker.
+ return GetNext(m_pLastPoppedEvent, beforeTimeStamp);
+}
+
+EventPipeEventInstance* EventPipeBuffer::PopNext(LARGE_INTEGER beforeTimeStamp)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Get the next event using the last popped event as a marker.
+ EventPipeEventInstance *pNext = PeekNext(beforeTimeStamp);
+ if(pNext != NULL)
+ {
+ m_pLastPoppedEvent = pNext;
+ }
+
+ return pNext;
+}
+
+#ifdef _DEBUG
+bool EventPipeBuffer::EnsureConsistency()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Check to see if the buffer is empty.
+ if(m_pBuffer == m_pCurrent)
+ {
+ // Make sure that the buffer size is greater than zero.
+ _ASSERTE(m_pBuffer != m_pLimit);
+ }
+
+ // Validate the contents of the filled portion of the buffer.
+ BYTE *ptr = m_pBuffer;
+ while(ptr < m_pCurrent)
+ {
+ // Validate the event.
+ EventPipeEventInstance *pInstance = (EventPipeEventInstance*)ptr;
+ _ASSERTE(pInstance->EnsureConsistency());
+
+ // Validate that payload and length match.
+ _ASSERTE((pInstance->GetData() != NULL && pInstance->GetLength() > 0) || (pInstance->GetData() != NULL && pInstance->GetLength() == 0));
+
+ // Skip the event.
+ ptr += sizeof(*pInstance) + pInstance->GetLength();
+ }
+
+ // When we're done walking the filled portion of the buffer,
+ // ptr should be the same as m_pCurrent.
+ _ASSERTE(ptr == m_pCurrent);
+
+ // Walk the rest of the buffer, making sure it is properly zeroed.
+ while(ptr < m_pLimit)
+ {
+ _ASSERTE(*ptr++ == 0);
+ }
+
+ return true;
+}
+#endif // _DEBUG
+
+#endif // FEATURE_PERFTRACING
diff --git a/src/vm/eventpipebuffer.h b/src/vm/eventpipebuffer.h
new file mode 100644
index 0000000000..f279a2865c
--- /dev/null
+++ b/src/vm/eventpipebuffer.h
@@ -0,0 +1,109 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef __EVENTPIPE_BUFFER_H__
+#define __EVENTPIPE_BUFFER_H__
+
+#ifdef FEATURE_PERFTRACING
+
+#include "eventpipeevent.h"
+#include "eventpipeeventinstance.h"
+
+class EventPipeBuffer
+{
+
+ friend class EventPipeBufferList;
+ friend class EventPipeBufferManager;
+
+private:
+
+ // A pointer to the actual buffer.
+ BYTE *m_pBuffer;
+
+ // The current write pointer.
+ BYTE *m_pCurrent;
+
+ // The max write pointer (end of the buffer).
+ BYTE *m_pLimit;
+
+ // The timestamp of the most recent event in the buffer.
+ LARGE_INTEGER m_mostRecentTimeStamp;
+
+ // Used by PopNext as input to GetNext.
+ // If NULL, no events have been popped.
+ // The event will still remain in the buffer after it is popped, but PopNext will not return it again.
+ EventPipeEventInstance *m_pLastPoppedEvent;
+
+ // Each buffer will become part of a per-thread linked list of buffers.
+ // The linked list is invasive, thus we declare the pointers here.
+ EventPipeBuffer *m_pPrevBuffer;
+ EventPipeBuffer *m_pNextBuffer;
+
+ unsigned int GetSize() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (unsigned int)(m_pLimit - m_pBuffer);
+ }
+
+ EventPipeBuffer* GetPrevious() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pPrevBuffer;
+ }
+
+ EventPipeBuffer* GetNext() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pNextBuffer;
+ }
+
+ void SetPrevious(EventPipeBuffer *pBuffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pPrevBuffer = pBuffer;
+ }
+
+ void SetNext(EventPipeBuffer *pBuffer)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pNextBuffer = pBuffer;
+ }
+
+public:
+
+ EventPipeBuffer(unsigned int bufferSize);
+ ~EventPipeBuffer();
+
+ // Write an event to the buffer.
+ // An optional stack trace can be provided for sample profiler events.
+ // Otherwise, if a stack trace is needed, one will be automatically collected.
+ // Returns:
+ // - true: The write succeeded.
+ // - false: The write failed. In this case, the buffer should be considered full.
+ bool WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int dataLength, LPCGUID pActivityId, LPCGUID pRelatedActivityId, StackContents *pStack = NULL);
+
+ // Get the timestamp of the most recent event in the buffer.
+ LARGE_INTEGER GetMostRecentTimeStamp() const;
+
+ // Clear the buffer.
+ void Clear();
+
+ // Get the next event from the buffer as long as it is before the specified timestamp.
+ // Input of NULL gets the first event.
+ EventPipeEventInstance* GetNext(EventPipeEventInstance *pEvent, LARGE_INTEGER beforeTimeStamp);
+
+ // Get the next event from the buffer, but don't mark it read.
+ EventPipeEventInstance* PeekNext(LARGE_INTEGER beforeTimeStamp);
+
+ // Get the next event from the buffer and mark it as read.
+ EventPipeEventInstance* PopNext(LARGE_INTEGER beforeTimeStamp);
+
+#ifdef _DEBUG
+ bool EnsureConsistency();
+#endif // _DEBUG
+};
+
+#endif // FEATURE_PERFTRACING
+
+#endif // __EVENTPIPE_BUFFER_H__
diff --git a/src/vm/eventpipebuffermanager.cpp b/src/vm/eventpipebuffermanager.cpp
new file mode 100644
index 0000000000..86a3e03c59
--- /dev/null
+++ b/src/vm/eventpipebuffermanager.cpp
@@ -0,0 +1,808 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+#include "eventpipeconfiguration.h"
+#include "eventpipebuffer.h"
+#include "eventpipebuffermanager.h"
+
+#ifdef FEATURE_PERFTRACING
+
+EventPipeBufferManager::EventPipeBufferManager()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pPerThreadBufferList = new SList<SListElem<EventPipeBufferList*>>();
+ m_sizeOfAllBuffers = 0;
+ m_lock.Init(LOCK_TYPE_DEFAULT);
+
+#ifdef _DEBUG
+ m_numBuffersAllocated = 0;
+ m_numBuffersStolen = 0;
+ m_numBuffersLeaked = 0;
+ m_numEventsStored = 0;
+ m_numEventsWritten = 0;
+#endif // _DEBUG
+}
+
+EventPipeBuffer* EventPipeBufferManager::AllocateBufferForThread(Thread *pThread, unsigned int requestSize)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pThread != NULL);
+ PRECONDITION(requestSize > 0);
+ }
+ CONTRACTL_END;
+
+ // Allocating a buffer requires us to take the lock.
+ SpinLockHolder _slh(&m_lock);
+
+ // Determine if the requesting thread has at least one buffer.
+ // If not, we guarantee that each thread gets at least one (to prevent thrashing when the circular buffer size is too small).
+ bool allocateNewBuffer = false;
+ EventPipeBufferList *pThreadBufferList = pThread->GetEventPipeBufferList();
+ if(pThreadBufferList == NULL)
+ {
+ pThreadBufferList = new EventPipeBufferList(this);
+ m_pPerThreadBufferList->InsertTail(new SListElem<EventPipeBufferList*>(pThreadBufferList));
+ pThread->SetEventPipeBufferList(pThreadBufferList);
+ allocateNewBuffer = true;
+ }
+
+ // Determine if policy allows us to allocate another buffer, or if we need to steal one
+ // from another thread.
+ if(!allocateNewBuffer)
+ {
+ EventPipeConfiguration *pConfig = EventPipe::GetConfiguration();
+ if(pConfig == NULL)
+ {
+ return NULL;
+ }
+
+ size_t circularBufferSizeInBytes = pConfig->GetCircularBufferSize();
+ if(m_sizeOfAllBuffers < circularBufferSizeInBytes)
+ {
+ // We don't worry about the fact that a new buffer could put us over the circular buffer size.
+ // This is OK, and we won't do it again if we actually go over.
+ allocateNewBuffer = true;
+ }
+ }
+
+ EventPipeBuffer *pNewBuffer = NULL;
+ if(!allocateNewBuffer)
+ {
+ // We can't allocate a new buffer.
+ // Find the oldest buffer, zero it, and re-purpose it for this thread.
+
+ // Find the thread that contains the oldest stealable buffer, and get its list of buffers.
+ EventPipeBufferList *pListToStealFrom = FindThreadToStealFrom();
+ if(pListToStealFrom != NULL)
+ {
+ // Assert that the buffer we're stealing is not the only buffer in the list.
+ // This invariant is enforced by FindThreadToStealFrom.
+ _ASSERTE((pListToStealFrom->GetHead() != NULL) && (pListToStealFrom->GetHead()->GetNext() != NULL));
+
+ // Remove the oldest buffer from the list.
+ pNewBuffer = pListToStealFrom->GetAndRemoveHead();
+
+ // De-allocate the buffer. We do this because buffers are variable sized
+ // based on how much volume is coming from the thread.
+ DeAllocateBuffer(pNewBuffer);
+ pNewBuffer = NULL;
+
+ // Set that we want to allocate a new buffer.
+ allocateNewBuffer = true;
+
+#ifdef _DEBUG
+ m_numBuffersStolen++;
+#endif // _DEBUG
+
+ }
+ else
+ {
+ // This only happens when # of threads == # of buffers.
+ // We'll allocate one more buffer, and then this won't happen again.
+ allocateNewBuffer = true;
+ }
+ }
+
+ if(allocateNewBuffer)
+ {
+ // Pick a buffer size by multiplying the base buffer size by the number of buffers already allocated for this thread.
+ unsigned int sizeMultiplier = pThreadBufferList->GetCount() + 1;
+
+ // Pick the base buffer size based. Debug builds have a smaller size to stress the allocate/steal path more.
+ unsigned int baseBufferSize =
+#ifdef _DEBUG
+ 5 * 1024; // 5K
+#else
+ 100 * 1024; // 100K
+#endif
+ unsigned int bufferSize = baseBufferSize * sizeMultiplier;
+
+ // Make sure that buffer size >= request size so that the buffer size does not
+ // determine the max event size.
+ if(bufferSize < requestSize)
+ {
+ bufferSize = requestSize;
+ }
+
+ pNewBuffer = new EventPipeBuffer(bufferSize);
+ m_sizeOfAllBuffers += bufferSize;
+#ifdef _DEBUG
+ m_numBuffersAllocated++;
+#endif // _DEBUG
+ }
+
+ // Set the buffer on the thread.
+ if(pNewBuffer != NULL)
+ {
+ pThreadBufferList->InsertTail(pNewBuffer);
+ return pNewBuffer;
+ }
+
+ return NULL;
+}
+
+EventPipeBufferList* EventPipeBufferManager::FindThreadToStealFrom()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(m_lock.OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ // Find the thread buffer list containing the buffer whose most recent event is the oldest as long as the buffer is not
+ // the current buffer for the thread (e.g. it's next pointer is non-NULL).
+ // This means that the thread must also have multiple buffers, so that we don't steal its only buffer.
+ EventPipeBufferList *pOldestContainingList = NULL;
+
+ SListElem<EventPipeBufferList*> *pElem = m_pPerThreadBufferList->GetHead();
+ while(pElem != NULL)
+ {
+ EventPipeBufferList *pCandidate = pElem->GetValue();
+
+ // The current candidate has more than one buffer (otherwise it is disqualified).
+ if(pCandidate->GetHead()->GetNext() != NULL)
+ {
+ // If we haven't seen any candidates, this one automatically becomes the oldest candidate.
+ if(pOldestContainingList == NULL)
+ {
+ pOldestContainingList = pCandidate;
+ }
+ // Otherwise, to replace the existing candidate, this candidate must have an older timestamp in its oldest buffer.
+ else if((pOldestContainingList->GetHead()->GetMostRecentTimeStamp().QuadPart) >
+ (pCandidate->GetHead()->GetMostRecentTimeStamp().QuadPart))
+ {
+ pOldestContainingList = pCandidate;
+ }
+ }
+
+ pElem = m_pPerThreadBufferList->GetNext(pElem);
+ }
+
+ return pOldestContainingList;
+}
+
+void EventPipeBufferManager::DeAllocateBuffer(EventPipeBuffer *pBuffer)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(pBuffer != NULL)
+ {
+ m_sizeOfAllBuffers -= pBuffer->GetSize();
+ delete(pBuffer);
+#ifdef _DEBUG
+ m_numBuffersAllocated--;
+#endif // _DEBUG
+ }
+}
+
+bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId, Thread *pEventThread, StackContents *pStack)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ // The input thread must match the current thread because no lock is taken on the buffer.
+ PRECONDITION(pThread == GetThread());
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pThread == GetThread());
+
+ // Check to see an event thread was specified. If not, then use the current thread.
+ if(pEventThread == NULL)
+ {
+ pEventThread = pThread;
+ }
+
+ // Before we pick a buffer, make sure the event is enabled.
+ if(!event.IsEnabled())
+ {
+ return false;
+ }
+
+ // The event is still enabled. Mark that the thread is now writing an event.
+ pThread->SetEventWriteInProgress(true);
+
+ // Check one more time to make sure that the event is still enabled.
+ // We do this because we might be trying to disable tracing and free buffers, so we
+ // must make sure that the event is enabled after we mark that we're writing to avoid
+ // races with the destructing thread.
+ if(!event.IsEnabled())
+ {
+ return false;
+ }
+
+ // See if the thread already has a buffer to try.
+ bool allocNewBuffer = false;
+ EventPipeBuffer *pBuffer = NULL;
+ EventPipeBufferList *pThreadBufferList = pThread->GetEventPipeBufferList();
+ if(pThreadBufferList == NULL)
+ {
+ allocNewBuffer = true;
+ }
+ else
+ {
+ // The thread already has a buffer list. Select the newest buffer and attempt to write into it.
+ pBuffer = pThreadBufferList->GetTail();
+ if(pBuffer == NULL)
+ {
+ // This should never happen. If the buffer list exists, it must contain at least one entry.
+ _ASSERT(!"Thread buffer list with zero entries encountered.");
+ return false;
+ }
+ else
+ {
+ // Attempt to write the event to the buffer. If this fails, we should allocate a new buffer.
+ allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, pData, length, pActivityId, pRelatedActivityId, pStack);
+ }
+ }
+
+ // Check to see if we need to allocate a new buffer, and if so, do it here.
+ if(allocNewBuffer)
+ {
+ // We previously switched to preemptive mode here, however, this is not safe and can cause deadlocks.
+ // When a GC is started, and background threads are created (for the first BGC), a thread creation event is fired.
+ // When control gets here the buffer is allocated, but then the thread hangs waiting for the GC to complete
+ // (it was marked as started before creating threads) so that it can switch back to cooperative mode.
+ // However, the GC is waiting on this call to return so that it can make forward progress. Thus it is not safe
+ // to switch to preemptive mode here.
+
+ unsigned int requestSize = sizeof(EventPipeEventInstance) + length;
+ pBuffer = AllocateBufferForThread(pThread, requestSize);
+ }
+
+ // Try to write the event after we allocated (or stole) a buffer.
+ // This is the first time if the thread had no buffers before the call to this function.
+ // This is the second time if this thread did have one or more buffers, but they were full.
+ if(allocNewBuffer && pBuffer != NULL)
+ {
+ allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, pData, length, pActivityId, pRelatedActivityId, pStack);
+ }
+
+ // Mark that the thread is no longer writing an event.
+ pThread->SetEventWriteInProgress(false);
+
+#ifdef _DEBUG
+ if(!allocNewBuffer)
+ {
+ InterlockedIncrement(&m_numEventsStored);
+ }
+#endif // _DEBUG
+ return !allocNewBuffer;
+}
+
+void EventPipeBufferManager::WriteAllBuffersToFile(EventPipeFile *pFile, LARGE_INTEGER stopTimeStamp)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pFile != NULL);
+ }
+ CONTRACTL_END;
+
+ // TODO: Better version of merge sort.
+ // 1. Iterate through all of the threads, adding each buffer to a temporary list.
+ // 2. While iterating, get the lowest most recent timestamp. This is the timestamp that we want to process up to.
+ // 3. Process up to the lowest most recent timestamp for the set of buffers.
+ // 4. When we get NULLs from each of the buffers on PopNext(), we're done.
+ // 5. While iterating if PopNext() == NULL && Empty() == NULL, remove the buffer from the list. It's empty.
+ // 6. While iterating, grab the next lowest most recent timestamp.
+ // 7. Walk through the list again and look for any buffers that have a lower most recent timestamp than the next most recent timestamp.
+ // 8. If we find one, add it to the list and select its most recent timestamp as the lowest.
+ // 9. Process again (go to 3).
+ // 10. Continue until there are no more buffers to process.
+
+ // Take the lock before walking the buffer list.
+ SpinLockHolder _slh(&m_lock);
+
+ // Naively walk the circular buffer, writing the event stream in timestamp order.
+ while(true)
+ {
+ EventPipeEventInstance *pOldestInstance = NULL;
+ EventPipeBuffer *pOldestContainingBuffer = NULL;
+ EventPipeBufferList *pOldestContainingList = NULL;
+ SListElem<EventPipeBufferList*> *pElem = m_pPerThreadBufferList->GetHead();
+ while(pElem != NULL)
+ {
+ EventPipeBufferList *pBufferList = pElem->GetValue();
+
+ // Peek the next event out of the list.
+ EventPipeBuffer *pContainingBuffer = NULL;
+ EventPipeEventInstance *pNext = pBufferList->PeekNextEvent(stopTimeStamp, &pContainingBuffer);
+ if(pNext != NULL)
+ {
+ // If it's the oldest event we've seen, then save it.
+ if((pOldestInstance == NULL) ||
+ (pOldestInstance->GetTimeStamp().QuadPart > pNext->GetTimeStamp().QuadPart))
+ {
+ pOldestInstance = pNext;
+ pOldestContainingBuffer = pContainingBuffer;
+ pOldestContainingList = pBufferList;
+ }
+ }
+
+ pElem = m_pPerThreadBufferList->GetNext(pElem);
+ }
+
+ if(pOldestInstance == NULL)
+ {
+ // We're done. There are no more events.
+ break;
+ }
+
+ // Write the oldest event.
+ pFile->WriteEvent(*pOldestInstance);
+#ifdef _DEBUG
+ m_numEventsWritten++;
+#endif // _DEBUG
+
+ // Pop the event from the buffer.
+ pOldestContainingList->PopNextEvent(stopTimeStamp);
+ }
+}
+
+void EventPipeBufferManager::DeAllocateBuffers()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(EnsureConsistency());
+
+ // Take the thread store lock because we're going to iterate through the thread list.
+ {
+ ThreadStoreLockHolder tsl;
+
+ // Take the buffer manager manipulation lock.
+ SpinLockHolder _slh(&m_lock);
+
+ Thread *pThread = NULL;
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ {
+ // Get the thread's buffer list.
+ EventPipeBufferList *pBufferList = pThread->GetEventPipeBufferList();
+ if(pBufferList != NULL)
+ {
+ // Attempt to free the buffer list.
+ // If the thread is using its buffer list skip it.
+ // This means we will leak a single buffer, but if tracing is re-enabled, that buffer can be used again.
+ if(!pThread->GetEventWriteInProgress())
+ {
+ EventPipeBuffer *pBuffer = pBufferList->GetAndRemoveHead();
+ while(pBuffer != NULL)
+ {
+ DeAllocateBuffer(pBuffer);
+ pBuffer = pBufferList->GetAndRemoveHead();
+ }
+
+ // Remove the list entry from the per thread buffer list.
+ SListElem<EventPipeBufferList*> *pElem = m_pPerThreadBufferList->GetHead();
+ while(pElem != NULL)
+ {
+ EventPipeBufferList* pEntry = pElem->GetValue();
+ if(pEntry == pBufferList)
+ {
+ pElem = m_pPerThreadBufferList->FindAndRemove(pElem);
+
+ // In DEBUG, make sure that the element was found and removed.
+ _ASSERTE(pElem != NULL);
+ }
+ pElem = m_pPerThreadBufferList->GetNext(pElem);
+ }
+
+ // Remove the list reference from the thread.
+ pThread->SetEventPipeBufferList(NULL);
+
+ // Now that all of the list elements have been freed, free the list itself.
+ delete(pBufferList);
+ pBufferList = NULL;
+ }
+#ifdef _DEBUG
+ else
+ {
+ // We can't deallocate the buffers.
+ m_numBuffersLeaked += pBufferList->GetCount();
+ }
+#endif // _DEBUG
+ }
+ }
+ }
+
+ // Now that we've walked through all of the threads, let's see if there are any other buffers
+ // that belonged to threads that died during tracing. We can free these now.
+
+ // Take the buffer manager manipulation lock
+ SpinLockHolder _slh(&m_lock);
+
+ SListElem<EventPipeBufferList*> *pElem = m_pPerThreadBufferList->GetHead();
+ while(pElem != NULL)
+ {
+ // Get the list and determine if we can free it.
+ EventPipeBufferList *pBufferList = pElem->GetValue();
+ if(!pBufferList->OwnedByThread())
+ {
+ // Iterate over all nodes in the list and de-allocate them.
+ EventPipeBuffer *pBuffer = pBufferList->GetAndRemoveHead();
+ while(pBuffer != NULL)
+ {
+ DeAllocateBuffer(pBuffer);
+ pBuffer = pBufferList->GetAndRemoveHead();
+ }
+
+ // Remove the buffer list from the per-thread buffer list.
+ pElem = m_pPerThreadBufferList->FindAndRemove(pElem);
+ _ASSERTE(pElem != NULL);
+
+ // Now that all of the list elements have been freed, free the list itself.
+ delete(pBufferList);
+ pBufferList = NULL;
+ }
+
+ pElem = m_pPerThreadBufferList->GetNext(pElem);
+ }
+}
+
+#ifdef _DEBUG
+bool EventPipeBufferManager::EnsureConsistency()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ SListElem<EventPipeBufferList*> *pElem = m_pPerThreadBufferList->GetHead();
+ while(pElem != NULL)
+ {
+ EventPipeBufferList *pBufferList = pElem->GetValue();
+
+ _ASSERTE(pBufferList->EnsureConsistency());
+
+ pElem = m_pPerThreadBufferList->GetNext(pElem);
+ }
+
+ return true;
+}
+#endif // _DEBUG
+
+EventPipeBufferList::EventPipeBufferList(EventPipeBufferManager *pManager)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_pManager = pManager;
+ m_pHeadBuffer = NULL;
+ m_pTailBuffer = NULL;
+ m_bufferCount = 0;
+ m_pReadBuffer = NULL;
+ m_ownedByThread = true;
+
+#ifdef _DEBUG
+ m_pCreatingThread = GetThread();
+#endif // _DEBUG
+}
+
+EventPipeBuffer* EventPipeBufferList::GetHead()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pHeadBuffer;
+}
+
+EventPipeBuffer* EventPipeBufferList::GetTail()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pTailBuffer;
+}
+
+void EventPipeBufferList::InsertTail(EventPipeBuffer *pBuffer)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(pBuffer != NULL);
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(EnsureConsistency());
+
+ // Ensure that the input buffer didn't come from another list that was improperly cleaned up.
+ _ASSERTE((pBuffer->GetNext() == NULL) && (pBuffer->GetPrevious() == NULL));
+
+ // First node in the list.
+ if(m_pTailBuffer == NULL)
+ {
+ m_pHeadBuffer = m_pTailBuffer = pBuffer;
+ }
+ else
+ {
+ // Set links between the old and new tail nodes.
+ m_pTailBuffer->SetNext(pBuffer);
+ pBuffer->SetPrevious(m_pTailBuffer);
+
+ // Set the new tail node.
+ m_pTailBuffer = pBuffer;
+ }
+
+ m_bufferCount++;
+
+ _ASSERTE(EnsureConsistency());
+}
+
+EventPipeBuffer* EventPipeBufferList::GetAndRemoveHead()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(EnsureConsistency());
+
+ EventPipeBuffer *pRetBuffer = NULL;
+ if(m_pHeadBuffer != NULL)
+ {
+ // Save the head node.
+ pRetBuffer = m_pHeadBuffer;
+
+ // Set the new head node.
+ m_pHeadBuffer = m_pHeadBuffer->GetNext();
+
+ // Update the head node's previous pointer.
+ if(m_pHeadBuffer != NULL)
+ {
+ m_pHeadBuffer->SetPrevious(NULL);
+ }
+ else
+ {
+ // We just removed the last buffer from the list.
+ // Make sure both head and tail pointers are NULL.
+ m_pTailBuffer = NULL;
+ }
+
+ // Clear the next pointer of the old head node.
+ pRetBuffer->SetNext(NULL);
+
+ // Ensure that the old head node has no dangling references.
+ _ASSERTE((pRetBuffer->GetNext() == NULL) && (pRetBuffer->GetPrevious() == NULL));
+
+ // Decrement the count of buffers in the list.
+ m_bufferCount--;
+ }
+
+ _ASSERTE(EnsureConsistency());
+
+ return pRetBuffer;
+}
+
+unsigned int EventPipeBufferList::GetCount() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_bufferCount;
+}
+
+EventPipeEventInstance* EventPipeBufferList::PeekNextEvent(LARGE_INTEGER beforeTimeStamp, EventPipeBuffer **pContainingBuffer)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Get the current read buffer.
+ // If it's not set, start with the head buffer.
+ if(m_pReadBuffer == NULL)
+ {
+ m_pReadBuffer = m_pHeadBuffer;
+ }
+
+ // If the read buffer is still NULL, then this list contains no buffers.
+ if(m_pReadBuffer == NULL)
+ {
+ return NULL;
+ }
+
+ // Get the next event in the buffer.
+ EventPipeEventInstance *pNext = m_pReadBuffer->PeekNext(beforeTimeStamp);
+
+ // If the next event is NULL, then go to the next buffer.
+ if(pNext == NULL)
+ {
+ m_pReadBuffer = m_pReadBuffer->GetNext();
+ if(m_pReadBuffer != NULL)
+ {
+ pNext = m_pReadBuffer->PeekNext(beforeTimeStamp);
+ }
+ }
+
+ // Set the containing buffer.
+ if(pNext != NULL && pContainingBuffer != NULL)
+ {
+ *pContainingBuffer = m_pReadBuffer;
+ }
+
+ // Make sure pContainingBuffer is properly set.
+ _ASSERTE((pNext == NULL) || (pNext != NULL && pContainingBuffer == NULL) || (pNext != NULL && *pContainingBuffer == m_pReadBuffer));
+ return pNext;
+}
+
+EventPipeEventInstance* EventPipeBufferList::PopNextEvent(LARGE_INTEGER beforeTimeStamp)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Get the next event.
+ EventPipeBuffer *pContainingBuffer = NULL;
+ EventPipeEventInstance *pNext = PeekNextEvent(beforeTimeStamp, &pContainingBuffer);
+
+ // If the event is non-NULL, pop it.
+ if(pNext != NULL && pContainingBuffer != NULL)
+ {
+ pContainingBuffer->PopNext(beforeTimeStamp);
+
+ // If the buffer is not the last buffer in the list and it has been drained, de-allocate it.
+ if((pContainingBuffer->GetNext() != NULL) && (pContainingBuffer->PeekNext(beforeTimeStamp) == NULL))
+ {
+ // This buffer must be the head node of the list.
+ _ASSERTE(pContainingBuffer->GetPrevious() == NULL);
+ EventPipeBuffer *pRemoved = GetAndRemoveHead();
+ _ASSERTE(pRemoved == pContainingBuffer);
+
+ // De-allocate the buffer.
+ m_pManager->DeAllocateBuffer(pRemoved);
+
+ // Reset the read buffer so that it becomes the head node on next peek or pop operation.
+ m_pReadBuffer = NULL;
+ }
+ }
+
+ return pNext;
+}
+
+bool EventPipeBufferList::OwnedByThread()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_ownedByThread;
+}
+
+void EventPipeBufferList::SetOwnedByThread(bool value)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_ownedByThread = value;
+}
+
+#ifdef _DEBUG
+Thread* EventPipeBufferList::GetThread()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pCreatingThread;
+}
+
+bool EventPipeBufferList::EnsureConsistency()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Either the head and tail nodes are both NULL or both are non-NULL.
+ _ASSERTE((m_pHeadBuffer == NULL && m_pTailBuffer == NULL) || (m_pHeadBuffer != NULL && m_pTailBuffer != NULL));
+
+ // If the list is NULL, check the count and return.
+ if(m_pHeadBuffer == NULL)
+ {
+ _ASSERTE(m_bufferCount == 0);
+ return true;
+ }
+
+ // If the list is non-NULL, walk the list forward until we get to the end.
+ unsigned int nodeCount = (m_pHeadBuffer != NULL) ? 1 : 0;
+ EventPipeBuffer *pIter = m_pHeadBuffer;
+ while(pIter->GetNext() != NULL)
+ {
+ pIter = pIter->GetNext();
+ nodeCount++;
+
+ // Check for consistency of the buffer itself.
+ // NOTE: We can't check the last buffer because the owning thread could
+ // be writing to it, which could result in false asserts.
+ if(pIter->GetNext() != NULL)
+ {
+ _ASSERTE(pIter->EnsureConsistency());
+ }
+
+ // Check for cycles.
+ _ASSERTE(nodeCount <= m_bufferCount);
+ }
+
+ // When we're done with the walk, pIter must point to the tail node.
+ _ASSERTE(pIter == m_pTailBuffer);
+
+ // Node count must equal the buffer count.
+ _ASSERTE(nodeCount == m_bufferCount);
+
+ // Now, walk the list in reverse.
+ pIter = m_pTailBuffer;
+ nodeCount = (m_pTailBuffer != NULL) ? 1 : 0;
+ while(pIter->GetPrevious() != NULL)
+ {
+ pIter = pIter->GetPrevious();
+ nodeCount++;
+
+ // Check for cycles.
+ _ASSERTE(nodeCount <= m_bufferCount);
+ }
+
+ // When we're done with the reverse walk, pIter must point to the head node.
+ _ASSERTE(pIter == m_pHeadBuffer);
+
+ // Node count must equal the buffer count.
+ _ASSERTE(nodeCount == m_bufferCount);
+
+ // We're done.
+ return true;
+}
+#endif // _DEBUG
+
+#endif // FEATURE_PERFTRACING
diff --git a/src/vm/eventpipebuffermanager.h b/src/vm/eventpipebuffermanager.h
new file mode 100644
index 0000000000..a53721b7b8
--- /dev/null
+++ b/src/vm/eventpipebuffermanager.h
@@ -0,0 +1,161 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef __EVENTPIPE_BUFFERMANAGER_H__
+#define __EVENTPIPE_BUFFERMANAGER_H__
+
+#ifdef FEATURE_PERFTRACING
+
+#include "eventpipefile.h"
+#include "eventpipebuffer.h"
+#include "spinlock.h"
+
+class EventPipeBufferList;
+
+class EventPipeBufferManager
+{
+
+ // Declare friends.
+ friend class EventPipeBufferList;
+
+private:
+
+ // A list of linked-lists of buffer objects.
+ // Each entry in this list represents a set of buffers owned by a single thread.
+ // The actual Thread object has a pointer to the object contained in this list. This ensures that
+ // each thread can access its own list, while at the same time, ensuring that when
+ // a thread is destroyed, we keep the buffers around without having to perform any
+ // migration or book-keeping.
+ SList<SListElem<EventPipeBufferList*>> *m_pPerThreadBufferList;
+
+ // The total allocation size of buffers under management.
+ size_t m_sizeOfAllBuffers;
+
+ // Lock to protect access to the per-thread buffer list and total allocation size.
+ SpinLock m_lock;
+
+#ifdef _DEBUG
+ // For debugging purposes.
+ unsigned int m_numBuffersAllocated;
+ unsigned int m_numBuffersStolen;
+ unsigned int m_numBuffersLeaked;
+ Volatile<LONG> m_numEventsStored;
+ LONG m_numEventsWritten;
+#endif // _DEBUG
+
+ // Allocate a new buffer for the specified thread.
+ // This function will store the buffer in the thread's buffer list for future use and also return it here.
+ // A NULL return value means that a buffer could not be allocated.
+ EventPipeBuffer* AllocateBufferForThread(Thread *pThread, unsigned int requestSize);
+
+ // Add a buffer to the thread buffer list.
+ void AddBufferToThreadBufferList(EventPipeBufferList *pThreadBuffers, EventPipeBuffer *pBuffer);
+
+ // Find the thread that owns the oldest buffer that is eligible to be stolen.
+ EventPipeBufferList* FindThreadToStealFrom();
+
+ // De-allocates the input buffer.
+ void DeAllocateBuffer(EventPipeBuffer *pBuffer);
+
+public:
+
+ EventPipeBufferManager();
+
+ // Write an event to the input thread's current event buffer.
+ // An optional eventThread can be provided for sample profiler events.
+ // This is because the thread that writes the events is not the same as the "event thread".
+ // An optional stack trace can be provided for sample profiler events.
+ // Otherwise, if a stack trace is needed, one will be automatically collected.
+ bool WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId, Thread *pEventThread = NULL, StackContents *pStack = NULL);
+
+ // Write the contents of the managed buffers to the specified file.
+ // The stopTimeStamp is used to determine when tracing was stopped to ensure that we
+ // skip any events that might be partially written due to races when tracing is stopped.
+ void WriteAllBuffersToFile(EventPipeFile *pFile, LARGE_INTEGER stopTimeStamp);
+
+ // Attempt to de-allocate resources as best we can. It is possible for some buffers to leak because
+ // threads can be in the middle of a write operation and get blocked, and we may not get an opportunity
+ // to free their buffer for a very long time.
+ void DeAllocateBuffers();
+
+#ifdef _DEBUG
+ bool EnsureConsistency();
+#endif // _DEBUG
+};
+
+// Represents a list of buffers associated with a specific thread.
+class EventPipeBufferList
+{
+private:
+
+ // The buffer manager that owns this list.
+ EventPipeBufferManager *m_pManager;
+
+ // Buffers are stored in an intrusive linked-list from oldest to newest.
+ // Head is the oldest buffer. Tail is the newest (and currently used) buffer.
+ EventPipeBuffer *m_pHeadBuffer;
+ EventPipeBuffer *m_pTailBuffer;
+
+ // The number of buffers in the list.
+ unsigned int m_bufferCount;
+
+ // The current read buffer (used when processing events on tracing stop).
+ EventPipeBuffer *m_pReadBuffer;
+
+ // True if this thread is owned by a thread.
+ // If it is false, then this buffer can be de-allocated after it is drained.
+ Volatile<bool> m_ownedByThread;
+
+#ifdef _DEBUG
+ // For diagnostics, keep the thread pointer.
+ Thread *m_pCreatingThread;
+#endif // _DEBUG
+
+public:
+
+ EventPipeBufferList(EventPipeBufferManager *pManager);
+
+ // Get the head node of the list.
+ EventPipeBuffer* GetHead();
+
+ // Get the tail node of the list.
+ EventPipeBuffer* GetTail();
+
+ // Insert a new buffer at the tail of the list.
+ void InsertTail(EventPipeBuffer *pBuffer);
+
+ // Remove the head node of the list.
+ EventPipeBuffer* GetAndRemoveHead();
+
+ // Get the count of buffers in the list.
+ unsigned int GetCount() const;
+
+ // Get the next event as long as it is before the specified timestamp.
+ EventPipeEventInstance* PeekNextEvent(LARGE_INTEGER beforeTimeStamp, EventPipeBuffer **pContainingBuffer);
+
+ // Get the next event as long as it is before the specified timestamp, and also mark it as read.
+ EventPipeEventInstance* PopNextEvent(LARGE_INTEGER beforeTimeStamp);
+
+ // True if a thread owns this list.
+ bool OwnedByThread();
+
+ // Set whether or not this list is owned by a thread.
+ // If it is not owned by a thread, then it can be de-allocated
+ // after the buffer is drained.
+ // The default value is true.
+ void SetOwnedByThread(bool value);
+
+#ifdef _DEBUG
+ // Get the thread associated with this list.
+ Thread* GetThread();
+
+ // Validate the consistency of the list.
+ // This function will assert if the list is in an inconsistent state.
+ bool EnsureConsistency();
+#endif // _DEBUG
+};
+
+#endif // FEATURE_PERFTRACING
+
+#endif // __EVENTPIPE_BUFFERMANAGER_H__
diff --git a/src/vm/eventpipeconfiguration.cpp b/src/vm/eventpipeconfiguration.cpp
new file mode 100644
index 0000000000..42f9daf528
--- /dev/null
+++ b/src/vm/eventpipeconfiguration.cpp
@@ -0,0 +1,594 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+#include "eventpipe.h"
+#include "eventpipeconfiguration.h"
+#include "eventpipeeventinstance.h"
+#include "eventpipeprovider.h"
+
+#ifdef FEATURE_PERFTRACING
+
+// {5291C09C-2660-4D6A-83A3-C383FD020DEC}
+const GUID EventPipeConfiguration::s_configurationProviderID =
+ { 0x5291c09c, 0x2660, 0x4d6a, { 0x83, 0xa3, 0xc3, 0x83, 0xfd, 0x2, 0xd, 0xec } };
+
+EventPipeConfiguration::EventPipeConfiguration()
+{
+ STANDARD_VM_CONTRACT;
+
+ m_enabled = false;
+ m_rundownEnabled = false;
+ m_circularBufferSizeInBytes = 1024 * 1024 * 1000; // Default to 1000MB.
+ m_pEnabledProviderList = NULL;
+ m_pProviderList = new SList<SListElem<EventPipeProvider*>>();
+}
+
+EventPipeConfiguration::~EventPipeConfiguration()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(m_pEnabledProviderList != NULL)
+ {
+ delete(m_pEnabledProviderList);
+ m_pEnabledProviderList = NULL;
+ }
+
+ if(m_pProviderList != NULL)
+ {
+ delete(m_pProviderList);
+ m_pProviderList = NULL;
+ }
+}
+
+void EventPipeConfiguration::Initialize()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Create the configuration provider.
+ m_pConfigProvider = EventPipe::CreateProvider(s_configurationProviderID);
+
+ // Create the metadata event.
+ m_pMetadataEvent = m_pConfigProvider->AddEvent(
+ 0, /* eventID */
+ 0, /* keywords */
+ 0, /* eventVersion */
+ EventPipeEventLevel::LogAlways,
+ false); /* needStack */
+}
+
+bool EventPipeConfiguration::RegisterProvider(EventPipeProvider &provider)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Take the lock before manipulating the provider list.
+ CrstHolder _crst(EventPipe::GetLock());
+
+ // See if we've already registered this provider.
+ EventPipeProvider *pExistingProvider = GetProviderNoLock(provider.GetProviderID());
+ if(pExistingProvider != NULL)
+ {
+ return false;
+ }
+
+ // The provider has not been registered, so register it.
+ m_pProviderList->InsertTail(new SListElem<EventPipeProvider*>(&provider));
+
+ // Set the provider configuration and enable it if we know anything about the provider before it is registered.
+ if(m_pEnabledProviderList != NULL)
+ {
+ EventPipeEnabledProvider *pEnabledProvider = m_pEnabledProviderList->GetEnabledProvider(&provider);
+ if(pEnabledProvider != NULL)
+ {
+ provider.SetConfiguration(
+ true /* providerEnabled */,
+ pEnabledProvider->GetKeywords(),
+ pEnabledProvider->GetLevel());
+ }
+ }
+
+ return true;
+}
+
+bool EventPipeConfiguration::UnregisterProvider(EventPipeProvider &provider)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Take the lock before manipulating the provider list.
+ CrstHolder _crst(EventPipe::GetLock());
+
+ // Find the provider.
+ SListElem<EventPipeProvider*> *pElem = m_pProviderList->GetHead();
+ while(pElem != NULL)
+ {
+ if(pElem->GetValue() == &provider)
+ {
+ break;
+ }
+
+ pElem = m_pProviderList->GetNext(pElem);
+ }
+
+ // If we found the provider, remove it.
+ if(pElem != NULL)
+ {
+ if(m_pProviderList->FindAndRemove(pElem) != NULL)
+ {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+EventPipeProvider* EventPipeConfiguration::GetProvider(const GUID &providerID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Take the lock before touching the provider list to ensure no one tries to
+ // modify the list.
+ CrstHolder _crst(EventPipe::GetLock());
+
+ return GetProviderNoLock(providerID);
+}
+
+EventPipeProvider* EventPipeConfiguration::GetProviderNoLock(const GUID &providerID)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(EventPipe::GetLock()->OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ SListElem<EventPipeProvider*> *pElem = m_pProviderList->GetHead();
+ while(pElem != NULL)
+ {
+ EventPipeProvider *pProvider = pElem->GetValue();
+ if(pProvider->GetProviderID() == providerID)
+ {
+ return pProvider;
+ }
+
+ pElem = m_pProviderList->GetNext(pElem);
+ }
+
+ return NULL;
+}
+
+size_t EventPipeConfiguration::GetCircularBufferSize() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_circularBufferSizeInBytes;
+}
+
+void EventPipeConfiguration::SetCircularBufferSize(size_t circularBufferSize)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if(!m_enabled)
+ {
+ m_circularBufferSizeInBytes = circularBufferSize;
+ }
+}
+
+void EventPipeConfiguration::Enable(
+ uint circularBufferSizeInMB,
+ EventPipeProviderConfiguration *pProviders,
+ int numProviders)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ // Lock must be held by EventPipe::Enable.
+ PRECONDITION(EventPipe::GetLock()->OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ m_circularBufferSizeInBytes = circularBufferSizeInMB * 1024 * 1024;
+ m_pEnabledProviderList = new EventPipeEnabledProviderList(pProviders, static_cast<unsigned int>(numProviders));
+ m_enabled = true;
+
+ SListElem<EventPipeProvider*> *pElem = m_pProviderList->GetHead();
+ while(pElem != NULL)
+ {
+ EventPipeProvider *pProvider = pElem->GetValue();
+
+ // Enable the provider if it has been configured.
+ EventPipeEnabledProvider *pEnabledProvider = m_pEnabledProviderList->GetEnabledProvider(pProvider);
+ if(pEnabledProvider != NULL)
+ {
+ pProvider->SetConfiguration(
+ true /* providerEnabled */,
+ pEnabledProvider->GetKeywords(),
+ pEnabledProvider->GetLevel());
+ }
+
+ pElem = m_pProviderList->GetNext(pElem);
+ }
+
+}
+
+void EventPipeConfiguration::Disable()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ // Lock must be held by EventPipe::Disable.
+ PRECONDITION(EventPipe::GetLock()->OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ SListElem<EventPipeProvider*> *pElem = m_pProviderList->GetHead();
+ while(pElem != NULL)
+ {
+ EventPipeProvider *pProvider = pElem->GetValue();
+ pProvider->SetConfiguration(false /* providerEnabled */, 0 /* keywords */, EventPipeEventLevel::Critical /* level */);
+
+ pElem = m_pProviderList->GetNext(pElem);
+ }
+
+ m_enabled = false;
+ m_rundownEnabled = false;
+
+ // Free the enabled providers list.
+ if(m_pEnabledProviderList != NULL)
+ {
+ delete(m_pEnabledProviderList);
+ m_pEnabledProviderList = NULL;
+ }
+}
+
+bool EventPipeConfiguration::Enabled() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_enabled;
+}
+
+bool EventPipeConfiguration::RundownEnabled() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_rundownEnabled;
+}
+
+void EventPipeConfiguration::EnableRundown()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ // Lock must be held by EventPipe::Disable.
+ PRECONDITION(EventPipe::GetLock()->OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ // Build the rundown configuration.
+ _ASSERTE(m_pEnabledProviderList == NULL);
+ const unsigned int numRundownProviders = 2;
+ EventPipeProviderConfiguration rundownProviders[numRundownProviders];
+ rundownProviders[0] = EventPipeProviderConfiguration(W("e13c0d23-ccbc-4e12-931b-d9cc2eee27e4"), 0x80020138, static_cast<unsigned int>(EventPipeEventLevel::Verbose)); // Public provider.
+ rundownProviders[1] = EventPipeProviderConfiguration(W("a669021c-c450-4609-a035-5af59af4df18"), 0x80020138, static_cast<unsigned int>(EventPipeEventLevel::Verbose)); // Rundown provider.
+
+ // Enable rundown.
+ m_rundownEnabled = true;
+
+ // Enable tracing. The circular buffer size doesn't matter because we're going to write all events synchronously during rundown.
+ Enable(1 /* circularBufferSizeInMB */, rundownProviders, numRundownProviders);
+}
+
+EventPipeEventInstance* EventPipeConfiguration::BuildEventMetadataEvent(EventPipeEventInstance &sourceInstance)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // The payload of the event should contain:
+ // - GUID ProviderID.
+ // - unsigned int EventID.
+ // - unsigned int EventVersion.
+ // - Optional event description payload.
+
+ // Calculate the size of the event.
+ EventPipeEvent &sourceEvent = *sourceInstance.GetEvent();
+ const GUID &providerID = sourceEvent.GetProvider()->GetProviderID();
+ unsigned int eventID = sourceEvent.GetEventID();
+ unsigned int eventVersion = sourceEvent.GetEventVersion();
+ BYTE *pPayloadData = sourceEvent.GetMetadata();
+ unsigned int payloadLength = sourceEvent.GetMetadataLength();
+ unsigned int instancePayloadSize = sizeof(providerID) + sizeof(eventID) + sizeof(eventVersion) + sizeof(payloadLength) + payloadLength;
+
+ // Allocate the payload.
+ BYTE *pInstancePayload = new BYTE[instancePayloadSize];
+
+ // Fill the buffer with the payload.
+ BYTE *currentPtr = pInstancePayload;
+
+ // Write the provider ID.
+ memcpy(currentPtr, (BYTE*)&providerID, sizeof(providerID));
+ currentPtr += sizeof(providerID);
+
+ // Write the event ID.
+ memcpy(currentPtr, &eventID, sizeof(eventID));
+ currentPtr += sizeof(eventID);
+
+ // Write the event version.
+ memcpy(currentPtr, &eventVersion, sizeof(eventVersion));
+ currentPtr += sizeof(eventVersion);
+
+ // Write the size of the metadata.
+ memcpy(currentPtr, &payloadLength, sizeof(payloadLength));
+ currentPtr += sizeof(payloadLength);
+
+ // Write the incoming payload data.
+ memcpy(currentPtr, pPayloadData, payloadLength);
+
+ // Construct the event instance.
+ EventPipeEventInstance *pInstance = new EventPipeEventInstance(
+ *m_pMetadataEvent,
+ GetCurrentThreadId(),
+ pInstancePayload,
+ instancePayloadSize,
+ NULL /* pActivityId */,
+ NULL /* pRelatedActivityId */);
+
+ // Set the timestamp to match the source event, because the metadata event
+ // will be emitted right before the source event.
+ pInstance->SetTimeStamp(sourceInstance.GetTimeStamp());
+
+ return pInstance;
+}
+
+void EventPipeConfiguration::DeleteDeferredProviders()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ // Lock must be held by EventPipe::Disable.
+ PRECONDITION(EventPipe::GetLock()->OwnedByCurrentThread());
+
+ }
+ CONTRACTL_END;
+
+ SListElem<EventPipeProvider*> *pElem = m_pProviderList->GetHead();
+ while(pElem != NULL)
+ {
+ EventPipeProvider *pProvider = pElem->GetValue();
+ if(pProvider->GetDeleteDeferred())
+ {
+ // The act of deleting the provider unregisters it and removes it from the list.
+ delete(pProvider);
+ }
+
+ pElem = m_pProviderList->GetNext(pElem);
+ }
+}
+
+EventPipeEnabledProviderList::EventPipeEnabledProviderList(
+ EventPipeProviderConfiguration *pConfigs,
+ unsigned int numConfigs)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pProviders = NULL;
+ m_pCatchAllProvider = NULL;
+ m_numProviders = 0;
+
+ // Test COMPLUS variable to enable tracing at start-up.
+ // If tracing is enabled at start-up create the catch-all provider and always return it.
+ if((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_PerformanceTracing) & 1) == 1)
+ {
+ m_pCatchAllProvider = new EventPipeEnabledProvider();
+ m_pCatchAllProvider->Set(NULL, 0xFFFFFFFFFFFFFFFF, EventPipeEventLevel::Verbose);
+ return;
+ }
+
+ m_pCatchAllProvider = NULL;
+ m_numProviders = numConfigs;
+ if(m_numProviders == 0)
+ {
+ return;
+ }
+
+ m_pProviders = new EventPipeEnabledProvider[m_numProviders];
+ for(int i=0; i<m_numProviders; i++)
+ {
+ m_pProviders[i].Set(
+ pConfigs[i].GetProviderName(),
+ pConfigs[i].GetKeywords(),
+ (EventPipeEventLevel)pConfigs[i].GetLevel());
+ }
+}
+
+EventPipeEnabledProviderList::~EventPipeEnabledProviderList()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(m_pProviders != NULL)
+ {
+ delete[] m_pProviders;
+ m_pProviders = NULL;
+ }
+ if(m_pCatchAllProvider != NULL)
+ {
+ delete(m_pCatchAllProvider);
+ m_pCatchAllProvider = NULL;
+ }
+}
+
+EventPipeEnabledProvider* EventPipeEnabledProviderList::GetEnabledProvider(
+ EventPipeProvider *pProvider)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // If tracing was enabled on start-up, all events should be on (this is a diagnostic config).
+ if(m_pCatchAllProvider != NULL)
+ {
+ return m_pCatchAllProvider;
+ }
+
+ if(m_pProviders == NULL)
+ {
+ return NULL;
+ }
+
+ // TEMPORARY: Convert the provider GUID to a string.
+ const unsigned int guidSize = 39;
+ WCHAR wszProviderID[guidSize];
+ if(!StringFromGUID2(pProvider->GetProviderID(), wszProviderID, guidSize))
+ {
+ wszProviderID[0] = '\0';
+ }
+
+ // Strip off the {}.
+ SString providerNameStr(&wszProviderID[1], guidSize-3);
+ LPCWSTR providerName = providerNameStr.GetUnicode();
+
+ EventPipeEnabledProvider *pEnabledProvider = NULL;
+ for(int i=0; i<m_numProviders; i++)
+ {
+ EventPipeEnabledProvider *pCandidate = &m_pProviders[i];
+ if(pCandidate != NULL)
+ {
+ if(wcscmp(providerName, pCandidate->GetProviderName()) == 0)
+ {
+ pEnabledProvider = pCandidate;
+ break;
+ }
+ }
+ }
+
+ return pEnabledProvider;
+}
+
+EventPipeEnabledProvider::EventPipeEnabledProvider()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pProviderName = NULL;
+ m_keywords = 0;
+}
+
+EventPipeEnabledProvider::~EventPipeEnabledProvider()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(m_pProviderName != NULL)
+ {
+ delete[] m_pProviderName;
+ m_pProviderName = NULL;
+ }
+}
+
+void EventPipeEnabledProvider::Set(LPCWSTR providerName, UINT64 keywords, EventPipeEventLevel loggingLevel)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(m_pProviderName != NULL)
+ {
+ delete(m_pProviderName);
+ m_pProviderName = NULL;
+ }
+
+ if(providerName != NULL)
+ {
+ unsigned int bufSize = wcslen(providerName) + 1;
+ m_pProviderName = new WCHAR[bufSize];
+ wcscpy_s(m_pProviderName, bufSize, providerName);
+ }
+ m_keywords = keywords;
+ m_loggingLevel = loggingLevel;
+}
+
+LPCWSTR EventPipeEnabledProvider::GetProviderName() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pProviderName;
+}
+
+UINT64 EventPipeEnabledProvider::GetKeywords() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_keywords;
+}
+
+EventPipeEventLevel EventPipeEnabledProvider::GetLevel() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_loggingLevel;
+}
+
+#endif // FEATURE_PERFTRACING
diff --git a/src/vm/eventpipeconfiguration.h b/src/vm/eventpipeconfiguration.h
new file mode 100644
index 0000000000..de8e79d2f3
--- /dev/null
+++ b/src/vm/eventpipeconfiguration.h
@@ -0,0 +1,164 @@
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef __EVENTPIPE_CONFIGURATION_H__
+#define __EVENTPIPE_CONFIGURATION_H__
+
+#ifdef FEATURE_PERFTRACING
+
+#include "slist.h"
+
+class EventPipeEnabledProvider;
+class EventPipeEnabledProviderList;
+class EventPipeEvent;
+class EventPipeEventInstance;
+class EventPipeProvider;
+struct EventPipeProviderConfiguration;
+
+enum class EventPipeEventLevel
+{
+ LogAlways,
+ Critical,
+ Error,
+ Warning,
+ Informational,
+ Verbose
+};
+
+class EventPipeConfiguration
+{
+public:
+
+ EventPipeConfiguration();
+ ~EventPipeConfiguration();
+
+ // Perform initialization that cannot be performed in the constructor.
+ void Initialize();
+
+ // Register a provider.
+ bool RegisterProvider(EventPipeProvider &provider);
+
+ // Unregister a provider.
+ bool UnregisterProvider(EventPipeProvider &provider);
+
+ // Get the provider with the specified provider ID if it exists.
+ EventPipeProvider* GetProvider(const GUID &providerID);
+
+ // Get the configured size of the circular buffer.
+ size_t GetCircularBufferSize() const;
+
+ // Set the configured size of the circular buffer.
+ void SetCircularBufferSize(size_t circularBufferSize);
+
+ // Enable the event pipe.
+ void Enable(
+ uint circularBufferSizeInMB,
+ EventPipeProviderConfiguration *pProviders,
+ int numProviders);
+
+ // Disable the event pipe.
+ void Disable();
+
+ // Get the status of the event pipe.
+ bool Enabled() const;
+
+ // Determine if rundown is enabled.
+ bool RundownEnabled() const;
+
+ // Enable the well-defined symbolic rundown configuration.
+ void EnableRundown();
+
+ // Get the event used to write metadata to the event stream.
+ EventPipeEventInstance* BuildEventMetadataEvent(EventPipeEventInstance &sourceInstance);
+
+ // Delete deferred providers.
+ void DeleteDeferredProviders();
+
+private:
+
+ // Get the provider without taking the lock.
+ EventPipeProvider* GetProviderNoLock(const GUID &providerID);
+
+ // Determines whether or not the event pipe is enabled.
+ Volatile<bool> m_enabled;
+
+ // The configured size of the circular buffer.
+ size_t m_circularBufferSizeInBytes;
+
+ // EventPipeConfiguration only supports a single session.
+ // This is the set of configurations for each enabled provider.
+ EventPipeEnabledProviderList *m_pEnabledProviderList;
+
+ // The list of event pipe providers.
+ SList<SListElem<EventPipeProvider*>> *m_pProviderList;
+
+ // The provider used to write configuration events to the event stream.
+ EventPipeProvider *m_pConfigProvider;
+
+ // The event used to write event information to the event stream.
+ EventPipeEvent *m_pMetadataEvent;
+
+ // The provider ID for the configuration event pipe provider.
+ // This provider is used to emit configuration events.
+ static const GUID s_configurationProviderID;
+
+ // True if rundown is enabled.
+ Volatile<bool> m_rundownEnabled;
+};
+
+class EventPipeEnabledProviderList
+{
+
+private:
+
+ // The number of providers in the list.
+ unsigned int m_numProviders;
+
+ // The list of providers.
+ EventPipeEnabledProvider *m_pProviders;
+
+ // A catch-all provider used when tracing is enabled at start-up
+ // under (COMPlus_PerformanceTracing & 1) == 1.
+ EventPipeEnabledProvider *m_pCatchAllProvider;
+
+public:
+
+ // Create a new list based on the input.
+ EventPipeEnabledProviderList(EventPipeProviderConfiguration *pConfigs, unsigned int numConfigs);
+ ~EventPipeEnabledProviderList();
+
+ // Get the enabled provider for the specified provider.
+ // Return NULL if one doesn't exist.
+ EventPipeEnabledProvider* GetEnabledProvider(EventPipeProvider *pProvider);
+};
+
+class EventPipeEnabledProvider
+{
+private:
+
+ // The provider name.
+ WCHAR *m_pProviderName;
+
+ // The enabled keywords.
+ UINT64 m_keywords;
+
+ // The loging level.
+ EventPipeEventLevel m_loggingLevel;
+
+public:
+
+ EventPipeEnabledProvider();
+ ~EventPipeEnabledProvider();
+
+ void Set(LPCWSTR providerName, UINT64 keywords, EventPipeEventLevel loggingLevel);
+
+ LPCWSTR GetProviderName() const;
+
+ UINT64 GetKeywords() const;
+
+ EventPipeEventLevel GetLevel() const;
+};
+
+#endif // FEATURE_PERFTRACING
+
+#endif // __EVENTPIPE_CONFIGURATION_H__
diff --git a/src/vm/eventpipeevent.cpp b/src/vm/eventpipeevent.cpp
new file mode 100644
index 0000000000..abf942b253
--- /dev/null
+++ b/src/vm/eventpipeevent.cpp
@@ -0,0 +1,120 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+#include "eventpipeevent.h"
+#include "eventpipeprovider.h"
+
+#ifdef FEATURE_PERFTRACING
+
+EventPipeEvent::EventPipeEvent(EventPipeProvider &provider, INT64 keywords, unsigned int eventID, unsigned int eventVersion, EventPipeEventLevel level, bool needStack, BYTE *pMetadata, unsigned int metadataLength)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pProvider = &provider;
+ m_keywords = keywords;
+ m_eventID = eventID;
+ m_eventVersion = eventVersion;
+ m_level = level;
+ m_needStack = needStack;
+ m_enabled = false;
+ if (pMetadata != NULL)
+ {
+ m_pMetadata = new BYTE[metadataLength];
+ memcpy(m_pMetadata, pMetadata, metadataLength);
+ m_metadataLength = metadataLength;
+ }
+ else
+ {
+ m_pMetadata = NULL;
+ m_metadataLength = 0;
+ }
+}
+
+EventPipeEvent::~EventPipeEvent()
+{
+ if (m_pMetadata != NULL)
+ {
+ delete[] m_pMetadata;
+ m_pMetadata = NULL;
+ }
+}
+
+EventPipeProvider* EventPipeEvent::GetProvider() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pProvider;
+}
+
+INT64 EventPipeEvent::GetKeywords() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_keywords;
+}
+
+unsigned int EventPipeEvent::GetEventID() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_eventID;
+}
+
+unsigned int EventPipeEvent::GetEventVersion() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_eventVersion;
+}
+
+EventPipeEventLevel EventPipeEvent::GetLevel() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_level;
+}
+
+bool EventPipeEvent::NeedStack() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_needStack;
+}
+
+bool EventPipeEvent::IsEnabled() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_enabled;
+}
+
+BYTE *EventPipeEvent::GetMetadata() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pMetadata;
+}
+
+unsigned int EventPipeEvent::GetMetadataLength() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_metadataLength;
+}
+
+void EventPipeEvent::RefreshState()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_enabled = m_pProvider->EventEnabled(m_keywords, m_level);
+}
+
+#endif // FEATURE_PERFTRACING
diff --git a/src/vm/eventpipeevent.h b/src/vm/eventpipeevent.h
new file mode 100644
index 0000000000..c91c4bac8e
--- /dev/null
+++ b/src/vm/eventpipeevent.h
@@ -0,0 +1,87 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef __EVENTPIPE_EVENT_H__
+#define __EVENTPIPE_EVENT_H__
+
+#ifdef FEATURE_PERFTRACING
+
+#include "eventpipeprovider.h"
+
+class EventPipeEvent
+{
+ // Declare friends.
+ friend class EventPipeProvider;
+
+private:
+
+ // The provider that contains the event.
+ EventPipeProvider *m_pProvider;
+
+ // Bit vector containing the keywords that enable the event.
+ INT64 m_keywords;
+
+ // The ID (within the provider) of the event.
+ unsigned int m_eventID;
+
+ // The version of the event.
+ unsigned int m_eventVersion;
+
+ // The verbosity of the event.
+ EventPipeEventLevel m_level;
+
+ // True if a call stack should be captured when writing the event.
+ bool m_needStack;
+
+ // True if the event is current enabled.
+ Volatile<bool> m_enabled;
+
+ // Metadata
+ BYTE *m_pMetadata;
+
+ // Metadata length;
+ unsigned int m_metadataLength;
+
+ // Refreshes the runtime state for this event.
+ // Called by EventPipeProvider when the provider configuration changes.
+ void RefreshState();
+
+ // Only EventPipeProvider can create events.
+ // The provider is responsible for allocating and freeing events.
+ EventPipeEvent(EventPipeProvider &provider, INT64 keywords, unsigned int eventID, unsigned int eventVersion, EventPipeEventLevel level, bool needStack, BYTE *pMetadata = NULL, unsigned int metadataLength = 0);
+
+ public:
+ ~EventPipeEvent();
+
+ // Get the provider associated with this event.
+ EventPipeProvider* GetProvider() const;
+
+ // Get the keywords that enable the event.
+ INT64 GetKeywords() const;
+
+ // Get the ID (within the provider) of the event.
+ unsigned int GetEventID() const;
+
+ // Get the version of the event.
+ unsigned int GetEventVersion() const;
+
+ // Get the verbosity of the event.
+ EventPipeEventLevel GetLevel() const;
+
+ // True if a call stack should be captured when writing the event.
+ bool NeedStack() const;
+
+ // True if the event is currently enabled.
+ bool IsEnabled() const;
+
+ // Get metadata
+ BYTE *GetMetadata() const;
+
+ // Get metadata length
+ unsigned int GetMetadataLength() const;
+};
+
+#endif // FEATURE_PERFTRACING
+
+#endif // __EVENTPIPE_EVENT_H__
diff --git a/src/vm/eventpipeeventinstance.cpp b/src/vm/eventpipeeventinstance.cpp
new file mode 100644
index 0000000000..afde2c0547
--- /dev/null
+++ b/src/vm/eventpipeeventinstance.cpp
@@ -0,0 +1,238 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+#include "eventpipeeventinstance.h"
+#include "eventpipejsonfile.h"
+#include "fastserializer.h"
+#include "sampleprofiler.h"
+
+#ifdef FEATURE_PERFTRACING
+
+EventPipeEventInstance::EventPipeEventInstance(
+ EventPipeEvent &event,
+ DWORD threadID,
+ BYTE *pData,
+ unsigned int length,
+ LPCGUID pActivityId,
+ LPCGUID pRelatedActivityId)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ m_debugEventStart = 0xDEADBEEF;
+ m_debugEventEnd = 0xCAFEBABE;
+#endif // _DEBUG
+ m_pEvent = &event;
+ m_threadID = threadID;
+ if(pActivityId != NULL)
+ {
+ m_activityId = *pActivityId;
+ }
+ else
+ {
+ m_activityId = {0};
+ }
+ if(pRelatedActivityId != NULL)
+ {
+ m_relatedActivityId = *pRelatedActivityId;
+ }
+ else
+ {
+ m_relatedActivityId = {0};
+ }
+
+ m_pData = pData;
+ m_dataLength = length;
+ QueryPerformanceCounter(&m_timeStamp);
+
+ if(event.NeedStack())
+ {
+ EventPipe::WalkManagedStackForCurrentThread(m_stackContents);
+ }
+
+#ifdef _DEBUG
+ EnsureConsistency();
+#endif // _DEBUG
+}
+
+StackContents* EventPipeEventInstance::GetStack()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return &m_stackContents;
+}
+
+EventPipeEvent* EventPipeEventInstance::GetEvent() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pEvent;
+}
+
+LARGE_INTEGER EventPipeEventInstance::GetTimeStamp() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_timeStamp;
+}
+
+BYTE* EventPipeEventInstance::GetData() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pData;
+}
+
+unsigned int EventPipeEventInstance::GetLength() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_dataLength;
+}
+
+void EventPipeEventInstance::FastSerialize(FastSerializer *pSerializer, StreamLabel metadataLabel)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef EVENTPIPE_EVENT_MARKER
+ // Useful for diagnosing serialization bugs.
+ const unsigned int value = 0xDEADBEEF;
+ pSerializer->WriteBuffer((BYTE*)&value, sizeof(value));
+#endif
+
+ // Calculate the size of the total payload so that it can be written to the file.
+ unsigned int payloadLength =
+ sizeof(metadataLabel) +
+ sizeof(m_threadID) + // Thread ID
+ sizeof(m_timeStamp) + // TimeStamp
+ sizeof(m_activityId) + // Activity ID
+ sizeof(m_relatedActivityId) + // Related Activity ID
+ sizeof(m_dataLength) + // Data payload length
+ m_dataLength + // Event payload data
+ sizeof(unsigned int) + // Prepended stack payload size in bytes
+ m_stackContents.GetSize(); // Stack payload size
+
+ // Write the size of the event to the file.
+ pSerializer->WriteBuffer((BYTE*)&payloadLength, sizeof(payloadLength));
+
+ // Write the metadata label.
+ pSerializer->WriteBuffer((BYTE*)&metadataLabel, sizeof(metadataLabel));
+
+ // Write the thread ID.
+ pSerializer->WriteBuffer((BYTE*)&m_threadID, sizeof(m_threadID));
+
+ // Write the timestamp.
+ pSerializer->WriteBuffer((BYTE*)&m_timeStamp, sizeof(m_timeStamp));
+
+ // Write the activity id.
+ pSerializer->WriteBuffer((BYTE*)&m_activityId, sizeof(m_activityId));
+
+ // Write the related activity id.
+ pSerializer->WriteBuffer((BYTE*)&m_relatedActivityId, sizeof(m_relatedActivityId));
+
+ // Write the data payload size.
+ pSerializer->WriteBuffer((BYTE*)&m_dataLength, sizeof(m_dataLength));
+
+ // Write the event data payload.
+ if(m_dataLength > 0)
+ {
+ pSerializer->WriteBuffer(m_pData, m_dataLength);
+ }
+
+ // Write the size of the stack in bytes.
+ unsigned int stackSize = m_stackContents.GetSize();
+ pSerializer->WriteBuffer((BYTE*)&stackSize, sizeof(stackSize));
+
+ // Write the stack if present.
+ if(stackSize > 0)
+ {
+ pSerializer->WriteBuffer(m_stackContents.GetPointer(), stackSize);
+ }
+}
+
+#ifdef _DEBUG
+void EventPipeEventInstance::SerializeToJsonFile(EventPipeJsonFile *pFile)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(pFile == NULL)
+ {
+ return;
+ }
+
+ EX_TRY
+ {
+ const unsigned int guidSize = 39;
+ WCHAR wszProviderID[guidSize];
+ if(!StringFromGUID2(m_pEvent->GetProvider()->GetProviderID(), wszProviderID, guidSize))
+ {
+ wszProviderID[0] = '\0';
+ }
+
+ // Strip off the {}.
+ StackScratchBuffer scratch;
+ SString guidStr(&wszProviderID[1], guidSize-3);
+
+ SString message;
+ message.Printf("Provider=%s/EventID=%d/Version=%d", guidStr.GetANSI(scratch), m_pEvent->GetEventID(), m_pEvent->GetEventVersion());
+ pFile->WriteEvent(m_timeStamp, m_threadID, message, m_stackContents);
+ }
+ EX_CATCH{} EX_END_CATCH(SwallowAllExceptions);
+}
+#endif
+
+void EventPipeEventInstance::SetTimeStamp(LARGE_INTEGER timeStamp)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_timeStamp = timeStamp;
+}
+
+#ifdef _DEBUG
+bool EventPipeEventInstance::EnsureConsistency()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Validate event start.
+ _ASSERTE(m_debugEventStart == 0xDEADBEEF);
+
+ // Validate event end.
+ _ASSERTE(m_debugEventEnd == 0xCAFEBABE);
+
+ return true;
+}
+#endif // _DEBUG
+
+SampleProfilerEventInstance::SampleProfilerEventInstance(EventPipeEvent &event, Thread *pThread, BYTE *pData, unsigned int length)
+ :EventPipeEventInstance(event, pThread->GetOSThreadId(), pData, length, NULL /* pActivityId */, NULL /* pRelatedActivityId */)
+{
+ LIMITED_METHOD_CONTRACT;
+}
+
+#endif // FEATURE_PERFTRACING
diff --git a/src/vm/eventpipeeventinstance.h b/src/vm/eventpipeeventinstance.h
new file mode 100644
index 0000000000..4fcf95c73e
--- /dev/null
+++ b/src/vm/eventpipeeventinstance.h
@@ -0,0 +1,90 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef __EVENTPIPE_EVENTINSTANCE_H__
+#define __EVENTPIPE_EVENTINSTANCE_H__
+
+#ifdef FEATURE_PERFTRACING
+
+#include "eventpipe.h"
+#include "eventpipeevent.h"
+#include "fastserializableobject.h"
+#include "fastserializer.h"
+
+class EventPipeEventInstance
+{
+ // Declare friends.
+ friend EventPipeConfiguration;
+
+public:
+
+ EventPipeEventInstance(EventPipeEvent &event, DWORD threadID, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId);
+
+ // Get the event associated with this instance.
+ EventPipeEvent* GetEvent() const;
+
+ // Get the stack contents object to either read or write to it.
+ StackContents* GetStack();
+
+ // Get the timestamp.
+ LARGE_INTEGER GetTimeStamp() const;
+
+ // Get a pointer to the data payload.
+ BYTE* GetData() const;
+
+ // Get the length of the data.
+ unsigned int GetLength() const;
+
+ // Serialize this object using FastSerialization.
+ void FastSerialize(FastSerializer *pSerializer, StreamLabel metadataLabel);
+
+#ifdef _DEBUG
+ // Serialize this event to the JSON file.
+ void SerializeToJsonFile(EventPipeJsonFile *pFile);
+
+ bool EnsureConsistency();
+#endif // _DEBUG
+
+protected:
+
+#ifdef _DEBUG
+ unsigned int m_debugEventStart;
+#endif // _DEBUG
+
+ EventPipeEvent *m_pEvent;
+ DWORD m_threadID;
+ LARGE_INTEGER m_timeStamp;
+ GUID m_activityId;
+ GUID m_relatedActivityId;
+
+ BYTE *m_pData;
+ unsigned int m_dataLength;
+ StackContents m_stackContents;
+
+#ifdef _DEBUG
+ unsigned int m_debugEventEnd;
+#endif // _DEBUG
+
+private:
+
+ // This is used for metadata events by EventPipeConfiguration because
+ // the metadata event is created after the first instance of the event
+ // but must be inserted into the file before the first instance of the event.
+ void SetTimeStamp(LARGE_INTEGER timeStamp);
+};
+
+// A specific type of event instance for use by the SampleProfiler.
+// This is needed because the SampleProfiler knows how to walk stacks belonging
+// to threads other than the current thread.
+class SampleProfilerEventInstance : public EventPipeEventInstance
+{
+
+public:
+
+ SampleProfilerEventInstance(EventPipeEvent &event, Thread *pThread, BYTE *pData, unsigned int length);
+};
+
+#endif // FEATURE_PERFTRACING
+
+#endif // __EVENTPIPE_EVENTINSTANCE_H__
diff --git a/src/vm/eventpipefile.cpp b/src/vm/eventpipefile.cpp
new file mode 100644
index 0000000000..f574814586
--- /dev/null
+++ b/src/vm/eventpipefile.cpp
@@ -0,0 +1,164 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+#include "eventpipebuffer.h"
+#include "eventpipeconfiguration.h"
+#include "eventpipefile.h"
+
+#ifdef FEATURE_PERFTRACING
+
+EventPipeFile::EventPipeFile(
+ SString &outputFilePath
+#ifdef _DEBUG
+ ,
+ bool lockOnWrite
+#endif // _DEBUG
+)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pSerializer = new FastSerializer(outputFilePath, *this);
+ m_serializationLock.Init(LOCK_TYPE_DEFAULT);
+ m_pMetadataLabels = new MapSHashWithRemove<EventPipeEvent*, StreamLabel>();
+
+#ifdef _DEBUG
+ m_lockOnWrite = lockOnWrite;
+#endif // _DEBUG
+
+ // File start time information.
+ GetSystemTime(&m_fileOpenSystemTime);
+ QueryPerformanceCounter(&m_fileOpenTimeStamp);
+ QueryPerformanceFrequency(&m_timeStampFrequency);
+
+ // Write a forward reference to the beginning of the event stream.
+ // This also allows readers to know where the event stream ends and skip it if needed.
+ m_beginEventsForwardReferenceIndex = m_pSerializer->AllocateForwardReference();
+ m_pSerializer->WriteForwardReference(m_beginEventsForwardReferenceIndex);
+
+ // Write the header information into the file.
+
+ // Write the current date and time.
+ m_pSerializer->WriteBuffer((BYTE*)&m_fileOpenSystemTime, sizeof(m_fileOpenSystemTime));
+
+ // Write FileOpenTimeStamp
+ m_pSerializer->WriteBuffer((BYTE*)&m_fileOpenTimeStamp, sizeof(m_fileOpenTimeStamp));
+
+ // Write ClockFrequency
+ m_pSerializer->WriteBuffer((BYTE*)&m_timeStampFrequency, sizeof(m_timeStampFrequency));
+}
+
+EventPipeFile::~EventPipeFile()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Mark the end of the event stream.
+ StreamLabel currentLabel = m_pSerializer->GetStreamLabel();
+
+ // Define the event start forward reference.
+ m_pSerializer->DefineForwardReference(m_beginEventsForwardReferenceIndex, currentLabel);
+
+ // Close the serializer.
+ if(m_pSerializer != NULL)
+ {
+ delete(m_pSerializer);
+ m_pSerializer = NULL;
+ }
+}
+
+void EventPipeFile::WriteEvent(EventPipeEventInstance &instance)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef _DEBUG
+ if(m_lockOnWrite)
+ {
+ // Take the serialization lock.
+ // This is used for synchronous file writes.
+ // The circular buffer path only writes from one thread.
+ SpinLockHolder _slh(&m_serializationLock);
+ }
+#endif // _DEBUG
+
+ // Check to see if we've seen this event type before.
+ // If not, then write the event metadata to the event stream first.
+ StreamLabel metadataLabel = GetMetadataLabel(*instance.GetEvent());
+ if(metadataLabel == 0)
+ {
+ EventPipeEventInstance* pMetadataInstance = EventPipe::GetConfiguration()->BuildEventMetadataEvent(instance);
+
+ metadataLabel = m_pSerializer->GetStreamLabel();
+ pMetadataInstance->FastSerialize(m_pSerializer, (StreamLabel)0); // 0 breaks recursion and represents the metadata event.
+
+ SaveMetadataLabel(*instance.GetEvent(), metadataLabel);
+
+ delete[] (pMetadataInstance->GetData());
+ delete (pMetadataInstance);
+ }
+
+ // Write the event to the stream.
+ instance.FastSerialize(m_pSerializer, metadataLabel);
+}
+
+StreamLabel EventPipeFile::GetMetadataLabel(EventPipeEvent &event)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ StreamLabel outLabel;
+ if(m_pMetadataLabels->Lookup(&event, &outLabel))
+ {
+ _ASSERTE(outLabel != 0);
+ return outLabel;
+ }
+
+ return 0;
+}
+
+void EventPipeFile::SaveMetadataLabel(EventPipeEvent &event, StreamLabel label)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(label > 0);
+ }
+ CONTRACTL_END;
+
+ // If a pre-existing metadata label exists, remove it.
+ StreamLabel outLabel;
+ if(m_pMetadataLabels->Lookup(&event, &outLabel))
+ {
+ m_pMetadataLabels->Remove(&event);
+ }
+
+ // Add the metadata label.
+ m_pMetadataLabels->Add(&event, label);
+}
+
+#endif // FEATURE_PERFTRACING
diff --git a/src/vm/eventpipefile.h b/src/vm/eventpipefile.h
new file mode 100644
index 0000000000..2f6853545d
--- /dev/null
+++ b/src/vm/eventpipefile.h
@@ -0,0 +1,85 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+
+#ifndef __EVENTPIPE_FILE_H__
+#define __EVENTPIPE_FILE_H__
+
+#ifdef FEATURE_PERFTRACING
+
+#include "eventpipe.h"
+#include "eventpipeeventinstance.h"
+#include "fastserializableobject.h"
+#include "fastserializer.h"
+
+class EventPipeFile : public FastSerializableObject
+{
+ public:
+
+ EventPipeFile(SString &outputFilePath
+#ifdef _DEBUG
+ ,
+ bool lockOnWrite = false
+#endif // _DEBUG
+ );
+ ~EventPipeFile();
+
+ // Write an event to the file.
+ void WriteEvent(EventPipeEventInstance &instance);
+
+ // Serialize this object.
+ // Not supported - this is the entry object for the trace,
+ // which means that the contents hasn't yet been created.
+ void FastSerialize(FastSerializer *pSerializer)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(!"This function should never be called!");
+ }
+
+ // Get the type name of this object.
+ const char* GetTypeName()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return "Microsoft.DotNet.Runtime.EventPipeFile";
+ }
+
+ private:
+
+ // Get the metadata address in the file for an event.
+ // The return value can be written into the file as a back-pointer to the event metadata.
+ StreamLabel GetMetadataLabel(EventPipeEvent &event);
+
+ // Save the metadata address in the file for an event.
+ void SaveMetadataLabel(EventPipeEvent &event, StreamLabel label);
+
+ // The object responsible for serialization.
+ FastSerializer *m_pSerializer;
+
+ // The system time when the file was opened.
+ SYSTEMTIME m_fileOpenSystemTime;
+
+ // The timestamp when the file was opened. Used for calculating file-relative timestamps.
+ LARGE_INTEGER m_fileOpenTimeStamp;
+
+ // The frequency of the timestamps used for this file.
+ LARGE_INTEGER m_timeStampFrequency;
+
+ // The forward reference index that marks the beginning of the event stream.
+ unsigned int m_beginEventsForwardReferenceIndex;
+
+ // The serialization which is responsible for making sure only a single event
+ // or block of events gets written to the file at once.
+ SpinLock m_serializationLock;
+
+ // Hashtable of metadata labels.
+ MapSHashWithRemove<EventPipeEvent*, StreamLabel> *m_pMetadataLabels;
+
+#ifdef _DEBUG
+ bool m_lockOnWrite;
+#endif // _DEBUG
+};
+
+#endif // FEATURE_PERFTRACING
+
+#endif // __EVENTPIPE_FILE_H__
diff --git a/src/vm/eventpipejsonfile.cpp b/src/vm/eventpipejsonfile.cpp
index 6353c917e7..f76959053c 100644
--- a/src/vm/eventpipejsonfile.cpp
+++ b/src/vm/eventpipejsonfile.cpp
@@ -5,6 +5,9 @@
#include "common.h"
#include "eventpipejsonfile.h"
+#ifdef _DEBUG
+#ifdef FEATURE_PERFTRACING
+
EventPipeJsonFile::EventPipeJsonFile(SString &outFilePath)
{
CONTRACTL
@@ -15,6 +18,7 @@ EventPipeJsonFile::EventPipeJsonFile(SString &outFilePath)
}
CONTRACTL_END;
+ m_writeErrorEncountered = false;
m_pFileStream = new CFileStream();
if(FAILED(m_pFileStream->OpenForWrite(outFilePath)))
{
@@ -52,7 +56,14 @@ EventPipeJsonFile::~EventPipeJsonFile()
}
}
-void EventPipeJsonFile::WriteEvent(CommonEventFields &commonFields, SString &message, StackContents &stackContents)
+void EventPipeJsonFile::WriteEvent(EventPipeEventInstance &instance)
+{
+ STANDARD_VM_CONTRACT;
+
+ instance.SerializeToJsonFile(this);
+}
+
+void EventPipeJsonFile::WriteEvent(LARGE_INTEGER timeStamp, DWORD threadID, SString &message, StackContents &stackContents)
{
STANDARD_VM_CONTRACT;
@@ -67,16 +78,16 @@ void EventPipeJsonFile::WriteEvent(CommonEventFields &commonFields, SString &mes
// Convert the timestamp from a QPC value to a trace-relative timestamp.
double millisecondsSinceTraceStart = 0.0;
- if(commonFields.TimeStamp.QuadPart != m_fileOpenTimeStamp.QuadPart)
+ if(timeStamp.QuadPart != m_fileOpenTimeStamp.QuadPart)
{
LARGE_INTEGER elapsedNanoseconds;
- elapsedNanoseconds.QuadPart = commonFields.TimeStamp.QuadPart - m_fileOpenTimeStamp.QuadPart;
+ elapsedNanoseconds.QuadPart = timeStamp.QuadPart - m_fileOpenTimeStamp.QuadPart;
millisecondsSinceTraceStart = elapsedNanoseconds.QuadPart / 1000000.0;
}
StackScratchBuffer scratch;
SString threadFrame;
- threadFrame.Printf("Thread (%d)", commonFields.ThreadID);
+ threadFrame.Printf("Thread (%d)", threadID);
SString event;
event.Printf("{\"Time\" : \"%f\", \"Metric\" : \"1\",\n\"Stack\": [\n\"%s\",\n%s\"%s\"]},", millisecondsSinceTraceStart, message.GetANSI(scratch), strCallStack.GetANSI(scratch), threadFrame.GetANSI(scratch));
Write(event);
@@ -129,3 +140,6 @@ void EventPipeJsonFile::FormatCallStack(StackContents &stackContents, SString &r
resultStr.Append(frameStr);
}
}
+
+#endif // _DEBUG
+#endif // FEATURE_PERFTRACING
diff --git a/src/vm/eventpipejsonfile.h b/src/vm/eventpipejsonfile.h
index b6e42def68..7686db7752 100644
--- a/src/vm/eventpipejsonfile.h
+++ b/src/vm/eventpipejsonfile.h
@@ -6,8 +6,12 @@
#ifndef __EVENTPIPE_JSONFILE_H__
#define __EVENTPIPE_JSONFILE_H__
+#ifdef _DEBUG
+#ifdef FEATURE_PERFTRACING
+
#include "common.h"
#include "eventpipe.h"
+#include "eventpipeeventinstance.h"
#include "fstream.h"
class EventPipeJsonFile
@@ -16,8 +20,11 @@ class EventPipeJsonFile
EventPipeJsonFile(SString &outFilePath);
~EventPipeJsonFile();
+ // Write an event instance.
+ void WriteEvent(EventPipeEventInstance &instance);
+
// Write an event with the specified message and stack.
- void WriteEvent(CommonEventFields &commonFields, SString &message, StackContents &stackContents);
+ void WriteEvent(LARGE_INTEGER timeStamp, DWORD threadID, SString &message, StackContents &stackContents);
private:
@@ -37,4 +44,7 @@ class EventPipeJsonFile
LARGE_INTEGER m_fileOpenTimeStamp;
};
+#endif // FEATURE_PERFTRACING
+#endif // _DEBUG
+
#endif // __EVENTPIPE_JSONFILE_H__
diff --git a/src/vm/eventpipeprovider.cpp b/src/vm/eventpipeprovider.cpp
new file mode 100644
index 0000000000..896f9b2650
--- /dev/null
+++ b/src/vm/eventpipeprovider.cpp
@@ -0,0 +1,244 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+#include "eventpipe.h"
+#include "eventpipeconfiguration.h"
+#include "eventpipeevent.h"
+#include "eventpipeprovider.h"
+
+#ifdef FEATURE_PERFTRACING
+
+EventPipeProvider::EventPipeProvider(const GUID &providerID, EventPipeCallback pCallbackFunction, void *pCallbackData)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_providerID = providerID;
+ m_enabled = false;
+ m_keywords = 0;
+ m_providerLevel = EventPipeEventLevel::Critical;
+ m_pEventList = new SList<SListElem<EventPipeEvent*>>();
+ m_pCallbackFunction = pCallbackFunction;
+ m_pCallbackData = pCallbackData;
+ m_pConfig = EventPipe::GetConfiguration();
+ _ASSERTE(m_pConfig != NULL);
+
+ // Register the provider.
+ m_pConfig->RegisterProvider(*this);
+}
+
+EventPipeProvider::~EventPipeProvider()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Unregister the provider.
+ // This call is re-entrant.
+ // NOTE: We don't use the cached event pipe configuration pointer
+ // in case this runs during shutdown and the configuration has already
+ // been freed.
+ EventPipeConfiguration* pConfig = EventPipe::GetConfiguration();
+ _ASSERTE(pConfig != NULL);
+ pConfig->UnregisterProvider(*this);
+
+ // Free all of the events.
+ if(m_pEventList != NULL)
+ {
+ // Take the lock before manipulating the list.
+ CrstHolder _crst(EventPipe::GetLock());
+
+ SListElem<EventPipeEvent*> *pElem = m_pEventList->GetHead();
+ while(pElem != NULL)
+ {
+ EventPipeEvent *pEvent = pElem->GetValue();
+ delete pEvent;
+
+ pElem = m_pEventList->GetNext(pElem);
+ }
+
+ delete m_pEventList;
+ m_pEventList = NULL;
+ }
+}
+
+const GUID& EventPipeProvider::GetProviderID() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return m_providerID;
+}
+
+bool EventPipeProvider::Enabled() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (m_pConfig->Enabled() && m_enabled);
+}
+
+bool EventPipeProvider::EventEnabled(INT64 keywords) const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // The event is enabled if:
+ // - The provider is enabled.
+ // - The event keywords are unspecified in the manifest (== 0) or when masked with the enabled config are != 0.
+ return (Enabled() && ((keywords == 0) || ((m_keywords & keywords) != 0)));
+}
+
+bool EventPipeProvider::EventEnabled(INT64 keywords, EventPipeEventLevel eventLevel) const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // The event is enabled if:
+ // - The provider is enabled.
+ // - The event keywords are unspecified in the manifest (== 0) or when masked with the enabled config are != 0.
+ // - The event level is LogAlways or the provider's verbosity level is set to greater than the event's verbosity level in the manifest.
+ return (EventEnabled(keywords) &&
+ ((eventLevel == EventPipeEventLevel::LogAlways) || (m_providerLevel >= eventLevel)));
+}
+
+void EventPipeProvider::SetConfiguration(bool providerEnabled, INT64 keywords, EventPipeEventLevel providerLevel)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(EventPipe::GetLock()->OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ m_enabled = providerEnabled;
+ m_keywords = keywords;
+ m_providerLevel = providerLevel;
+
+ RefreshAllEvents();
+ InvokeCallback();
+}
+
+EventPipeEvent* EventPipeProvider::AddEvent(unsigned int eventID, INT64 keywords, unsigned int eventVersion, EventPipeEventLevel level, BYTE *pMetadata, unsigned int metadataLength)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ return AddEvent(eventID, keywords, eventVersion, level, true /* needStack */, pMetadata, metadataLength);
+}
+
+EventPipeEvent* EventPipeProvider::AddEvent(unsigned int eventID, INT64 keywords, unsigned int eventVersion, EventPipeEventLevel level, bool needStack, BYTE *pMetadata, unsigned int metadataLength)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Create the event.
+ EventPipeEvent *pEvent = new EventPipeEvent(
+ *this,
+ keywords,
+ eventID,
+ eventVersion,
+ level,
+ needStack,
+ pMetadata,
+ metadataLength);
+
+ // Add it to the list of events.
+ AddEvent(*pEvent);
+ return pEvent;
+}
+
+void EventPipeProvider::AddEvent(EventPipeEvent &event)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Take the config lock before inserting a new event.
+ CrstHolder _crst(EventPipe::GetLock());
+
+ m_pEventList->InsertTail(new SListElem<EventPipeEvent*>(&event));
+ event.RefreshState();
+}
+
+void EventPipeProvider::InvokeCallback()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(EventPipe::GetLock()->OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ if(m_pCallbackFunction != NULL && !g_fEEShutDown)
+ {
+ (*m_pCallbackFunction)(
+ &m_providerID,
+ m_enabled,
+ (UCHAR) m_providerLevel,
+ m_keywords,
+ 0 /* matchAllKeywords */,
+ NULL /* FilterData */,
+ m_pCallbackData /* CallbackContext */);
+ }
+}
+
+bool EventPipeProvider::GetDeleteDeferred() const
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_deleteDeferred;
+}
+
+void EventPipeProvider::SetDeleteDeferred()
+{
+ LIMITED_METHOD_CONTRACT;
+ m_deleteDeferred = true;
+}
+
+void EventPipeProvider::RefreshAllEvents()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(EventPipe::GetLock()->OwnedByCurrentThread());
+ }
+ CONTRACTL_END;
+
+ SListElem<EventPipeEvent*> *pElem = m_pEventList->GetHead();
+ while(pElem != NULL)
+ {
+ EventPipeEvent *pEvent = pElem->GetValue();
+ pEvent->RefreshState();
+
+ pElem = m_pEventList->GetNext(pElem);
+ }
+}
+
+#endif // FEATURE_PERFTRACING
diff --git a/src/vm/eventpipeprovider.h b/src/vm/eventpipeprovider.h
new file mode 100644
index 0000000000..d2c459ef32
--- /dev/null
+++ b/src/vm/eventpipeprovider.h
@@ -0,0 +1,117 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef __EVENTPIPE_PROVIDER_H__
+#define __EVENTPIPE_PROVIDER_H__
+
+#ifdef FEATURE_PERFTRACING
+
+#include "eventpipe.h"
+#include "eventpipeconfiguration.h"
+#include "slist.h"
+
+class EventPipeEvent;
+
+// Define the event pipe callback to match the ETW callback signature.
+typedef void (*EventPipeCallback)(
+ LPCGUID SourceID,
+ ULONG IsEnabled,
+ UCHAR Level,
+ ULONGLONG MatchAnyKeywords,
+ ULONGLONG MatchAllKeywords,
+ void *FilterData,
+ void *CallbackContext);
+
+class EventPipeProvider
+{
+ // Declare friends.
+ friend class EventPipe;
+ friend class EventPipeConfiguration;
+ friend class SampleProfiler;
+
+private:
+ // The GUID of the provider.
+ GUID m_providerID;
+
+ // True if the provider is enabled.
+ bool m_enabled;
+
+ // Bit vector containing the currently enabled keywords.
+ INT64 m_keywords;
+
+ // The current verbosity of the provider.
+ EventPipeEventLevel m_providerLevel;
+
+ // List of every event currently associated with the provider.
+ // New events can be added on-the-fly.
+ SList<SListElem<EventPipeEvent*>> *m_pEventList;
+
+ // The optional provider callback.
+ EventPipeCallback m_pCallbackFunction;
+
+ // The optional provider callback data pointer.
+ void *m_pCallbackData;
+
+ // The configuration object.
+ EventPipeConfiguration *m_pConfig;
+
+ // True if the provider has been deleted, but that deletion
+ // has been deferred until tracing is stopped.
+ bool m_deleteDeferred;
+
+ // Private constructor because all providers are created through EventPipe::CreateProvider.
+ EventPipeProvider(const GUID &providerID, EventPipeCallback pCallbackFunction = NULL, void *pCallbackData = NULL);
+
+public:
+
+ ~EventPipeProvider();
+
+ // Get the provider ID.
+ const GUID& GetProviderID() const;
+
+ // Determine if the provider is enabled.
+ bool Enabled() const;
+
+ // Determine if the specified keywords are enabled.
+ bool EventEnabled(INT64 keywords) const;
+
+ // Determine if the specified keywords and level match the configuration.
+ bool EventEnabled(INT64 keywords, EventPipeEventLevel eventLevel) const;
+
+ // Create a new event.
+ EventPipeEvent* AddEvent(unsigned int eventID, INT64 keywords, unsigned int eventVersion, EventPipeEventLevel level, BYTE *pMetadata = NULL, unsigned int metadataLength = 0);
+
+ private:
+
+ // Create a new event, but allow needStack to be specified.
+ // In general, we want stack walking to be controlled by the consumer and not the producer of events.
+ // However, there are a couple of cases that we know we don't want to do a stackwalk that would affect performance significantly:
+ // 1. Sample profiler events: The sample profiler already does a stack walk of the target thread. Doing one of the sampler thread is a waste.
+ // 2. Metadata events: These aren't as painful but because we have to keep this functionality around, might as well use it.
+ EventPipeEvent* AddEvent(unsigned int eventID, INT64 keywords, unsigned int eventVersion, EventPipeEventLevel level, bool needStack, BYTE *pMetadata = NULL, unsigned int metadataLength = 0);
+
+ // Add an event to the provider.
+ void AddEvent(EventPipeEvent &event);
+
+ // Set the provider configuration (enable and disable sets of events).
+ // This is called by EventPipeConfiguration.
+ void SetConfiguration(bool providerEnabled, INT64 keywords, EventPipeEventLevel providerLevel);
+
+ // Refresh the runtime state of all events.
+ void RefreshAllEvents();
+
+ // Invoke the provider callback.
+ void InvokeCallback();
+
+ // Specifies whether or not the provider was deleted, but that deletion
+ // was deferred until after tracing is stopped.
+ bool GetDeleteDeferred() const;
+
+ // Defer deletion of the provider.
+ void SetDeleteDeferred();
+};
+
+#endif // FEATURE_PERFTRACING
+
+#endif // __EVENTPIPE_PROVIDER_H__
diff --git a/src/vm/eventtrace.cpp b/src/vm/eventtrace.cpp
index 1eb89385a6..84ce711c1f 100644
--- a/src/vm/eventtrace.cpp
+++ b/src/vm/eventtrace.cpp
@@ -197,7 +197,10 @@ BOOL IsRundownNgenKeywordEnabledAndNotSuppressed()
{
LIMITED_METHOD_CONTRACT;
- return
+ return
+#ifdef FEATURE_PERFTRACING
+ EventPipeHelper::Enabled() ||
+#endif // FEATURE_PERFTRACING
(
ETW_TRACING_CATEGORY_ENABLED(
MICROSOFT_WINDOWS_DOTNETRUNTIME_RUNDOWN_PROVIDER_Context,
@@ -4465,10 +4468,10 @@ extern "C"
#ifdef _TARGET_AMD64_
// We only do this on amd64 (NOT ARM, because ARM uses frame based stack crawling)
- // If we have turned on the JIT keyword to the VERBOSE setting (needed to get JIT names) then
+ // If we have turned on the JIT keyword to the INFORMATION setting (needed to get JIT names) then
// we assume that we also want good stack traces so we need to publish unwind information so
// ETW can get at it
- if(bIsPublicTraceHandle && ETW_CATEGORY_ENABLED((*context), TRACE_LEVEL_VERBOSE, CLR_RUNDOWNJIT_KEYWORD))
+ if(bIsPublicTraceHandle && ETW_CATEGORY_ENABLED((*context), TRACE_LEVEL_INFORMATION, CLR_RUNDOWNJIT_KEYWORD))
UnwindInfoTable::PublishUnwindInfo(g_fEEStarted != FALSE);
#endif
@@ -6760,9 +6763,9 @@ VOID ETW::MethodLog::SendHelperEvent(ULONGLONG ullHelperStartAddress, ULONG ulHe
ulHelperSize,
0,
methodFlags,
- NULL,
+ NULL,
pHelperName,
- NULL,
+ NULL,
GetClrInstanceId());
}
}
@@ -7385,3 +7388,12 @@ VOID ETW::EnumerationLog::EnumerationHelper(Module *moduleFilter, BaseDomain *do
}
#endif // !FEATURE_REDHAWK
+
+#ifdef FEATURE_PERFTRACING
+#include "eventpipe.h"
+bool EventPipeHelper::Enabled()
+{
+ LIMITED_METHOD_CONTRACT;
+ return EventPipe::Enabled();
+}
+#endif // FEATURE_PERFTRACING
diff --git a/src/vm/exceptionhandling.cpp b/src/vm/exceptionhandling.cpp
index 7030ef5e91..abea194b92 100644
--- a/src/vm/exceptionhandling.cpp
+++ b/src/vm/exceptionhandling.cpp
@@ -25,11 +25,15 @@
#define VSD_STUB_CAN_THROW_AV
#endif // _TARGET_ARM_ || _TARGET_X86_
+#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
+// ARM/ARM64 uses Caller-SP to locate PSPSym in the funclet frame.
+#define USE_CALLER_SP_IN_FUNCLET
+#endif // _TARGET_ARM_ || _TARGET_ARM64_
+
#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) || defined(_TARGET_X86_)
#define ADJUST_PC_UNWOUND_TO_CALL
#define STACK_RANGE_BOUNDS_ARE_CALLER_SP
#define USE_FUNCLET_CALL_HELPER
-#define USE_CALLER_SP_IN_FUNCLET
// For ARM/ARM64, EstablisherFrame is Caller-SP (SP just before executing call instruction).
// This has been confirmed by AaronGi from the kernel team for Windows.
//
diff --git a/src/vm/fastserializableobject.h b/src/vm/fastserializableobject.h
new file mode 100644
index 0000000000..cbfcfc9f0e
--- /dev/null
+++ b/src/vm/fastserializableobject.h
@@ -0,0 +1,32 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef __FASTSERIALIZABLE_OBJECT_H__
+#define __FASTSERIALIZABLE_OBJECT_H__
+
+#ifdef FEATURE_PERFTRACING
+
+class FastSerializer;
+
+class FastSerializableObject
+{
+
+public:
+
+ // Virtual destructor to ensure that derived class destructors get called.
+ virtual ~FastSerializableObject()
+ {
+ LIMITED_METHOD_CONTRACT;
+ }
+
+ // Serialize the object using the specified serializer.
+ virtual void FastSerialize(FastSerializer *pSerializer) = 0;
+
+ // Get the type name for the current object.
+ virtual const char* GetTypeName() = 0;
+};
+
+#endif // FEATURE_PERFTRACING
+
+#endif // _FASTSERIALIZABLE_OBJECT_H__
diff --git a/src/vm/fastserializer.cpp b/src/vm/fastserializer.cpp
new file mode 100644
index 0000000000..7f9b4e20a6
--- /dev/null
+++ b/src/vm/fastserializer.cpp
@@ -0,0 +1,337 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+#include "fastserializer.h"
+
+#ifdef FEATURE_PERFTRACING
+
+FastSerializer::FastSerializer(SString &outputFilePath, FastSerializableObject &object)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_writeErrorEncountered = false;
+ m_pEntryObject = &object;
+ m_currentPos = 0;
+ m_nextForwardReference = 0;
+ m_pFileStream = new CFileStream();
+ if(FAILED(m_pFileStream->OpenForWrite(outputFilePath)))
+ {
+ delete(m_pFileStream);
+ m_pFileStream = NULL;
+ return;
+ }
+
+ // Write the file header.
+ WriteFileHeader();
+
+ // Write the entry object.
+ WriteEntryObject();
+}
+
+FastSerializer::~FastSerializer()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ // Write the end of the entry object.
+ WriteTag(FastSerializerTags::EndObject);
+
+ // Write forward reference table.
+ StreamLabel forwardReferenceLabel = WriteForwardReferenceTable();
+
+ // Write trailer.
+ WriteTrailer(forwardReferenceLabel);
+
+ if(m_pFileStream != NULL)
+ {
+ delete(m_pFileStream);
+ m_pFileStream = NULL;
+ }
+}
+
+StreamLabel FastSerializer::GetStreamLabel() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return (StreamLabel)m_currentPos;
+}
+
+void FastSerializer::WriteObject(FastSerializableObject *pObject)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(pObject != NULL);
+ }
+ CONTRACTL_END;
+
+ // Write a BeginObject tag.
+ WriteTag(FastSerializerTags::BeginObject);
+
+ // Write object begin tag.
+ WriteSerializationType(pObject);
+
+ // Ask the object to serialize itself using the current serializer.
+ pObject->FastSerialize(this);
+
+ // Write object end tag.
+ WriteTag(FastSerializerTags::EndObject);
+}
+
+void FastSerializer::WriteBuffer(BYTE *pBuffer, unsigned int length)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(pBuffer != NULL);
+ PRECONDITION(length > 0);
+ }
+ CONTRACTL_END;
+
+ if(m_writeErrorEncountered || m_pFileStream == NULL)
+ {
+ return;
+ }
+
+ EX_TRY
+ {
+ ULONG outCount;
+ m_pFileStream->Write(pBuffer, length, &outCount);
+
+#ifdef _DEBUG
+ size_t prevPos = m_currentPos;
+#endif
+ m_currentPos += outCount;
+#ifdef _DEBUG
+ _ASSERTE(prevPos < m_currentPos);
+#endif
+
+ if (length != outCount)
+ {
+ // This will cause us to stop writing to the file.
+ // The file will still remain open until shutdown so that we don't have to take a lock at this level when we touch the file stream.
+ m_writeErrorEncountered = true;
+ }
+ }
+ EX_CATCH
+ {
+ m_writeErrorEncountered = true;
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+}
+
+void FastSerializer::WriteEntryObject()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ // Write begin entry object tag.
+ WriteTag(FastSerializerTags::BeginObject);
+
+ // Write the type information for the entry object.
+ WriteSerializationType(m_pEntryObject);
+
+ // The object is now initialized. Fields or other objects can now be written.
+}
+
+unsigned int FastSerializer::AllocateForwardReference()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(m_nextForwardReference < MaxForwardReferences);
+ }
+ CONTRACTL_END;
+
+ // TODO: Handle failure.
+
+ // Save the index.
+ int index = m_nextForwardReference;
+
+ // Allocate the forward reference and zero-fill it so that the reader
+ // will know if it was not properly defined.
+ m_forwardReferences[m_nextForwardReference++] = 0;
+
+ return index;
+}
+
+void FastSerializer::DefineForwardReference(unsigned int index, StreamLabel value)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(index < MaxForwardReferences-1);
+ }
+ CONTRACTL_END;
+
+ m_forwardReferences[index] = value;
+}
+
+void FastSerializer::WriteForwardReference(unsigned int index)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(index < MaxForwardReferences-1);
+ }
+ CONTRACTL_END;
+
+ WriteBuffer((BYTE*)&index, sizeof(index));
+}
+
+void FastSerializer::WriteSerializationType(FastSerializableObject *pObject)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ PRECONDITION(pObject != NULL);
+ }
+ CONTRACTL_END;
+
+ // Write the BeginObject tag.
+ WriteTag(FastSerializerTags::BeginObject);
+
+ // Write a NullReferenceTag, which implies that the following fields belong to SerializationType.
+ WriteTag(FastSerializerTags::NullReference);
+
+ // Write the SerializationType version fields.
+ int serializationType[2];
+ serializationType[0] = 1; // Object Version.
+ serializationType[1] = 0; // Minimum Reader Version.
+ WriteBuffer((BYTE*) &serializationType, sizeof(serializationType));
+
+ // Write the SerializationType TypeName field.
+ const char *strTypeName = pObject->GetTypeName();
+ unsigned int length = (unsigned int)strlen(strTypeName);
+ WriteString(strTypeName, length);
+
+ // Write the EndObject tag.
+ WriteTag(FastSerializerTags::EndObject);
+}
+
+
+void FastSerializer::WriteTag(FastSerializerTags tag, BYTE *payload, unsigned int payloadLength)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ WriteBuffer((BYTE *)&tag, sizeof(tag));
+ if(payload != NULL)
+ {
+ _ASSERTE(payloadLength > 0);
+ WriteBuffer(payload, payloadLength);
+ }
+}
+
+
+void FastSerializer::WriteFileHeader()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ const char *strSignature = "!FastSerialization.1";
+ unsigned int length = (unsigned int)strlen(strSignature);
+ WriteString(strSignature, length);
+}
+
+void FastSerializer::WriteString(const char *strContents, unsigned int length)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ // Write the string length .
+ WriteBuffer((BYTE*) &length, sizeof(length));
+
+ // Write the string contents.
+ WriteBuffer((BYTE*) strContents, length);
+}
+
+StreamLabel FastSerializer::WriteForwardReferenceTable()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ // Save the position of the start of the forward references table.
+ StreamLabel current = GetStreamLabel();
+
+ // Write the count of allocated references.
+ WriteBuffer((BYTE*) &m_nextForwardReference, sizeof(m_nextForwardReference));
+
+ // Write each of the allocated references.
+ WriteBuffer((BYTE*) m_forwardReferences, sizeof(StreamLabel) * m_nextForwardReference);
+
+ return current;
+}
+
+void FastSerializer::WriteTrailer(StreamLabel forwardReferencesTableStart)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ // Get the current location to mark the beginning of the trailer.
+ StreamLabel current = GetStreamLabel();
+
+ // Write the trailer, which contains the start of the forward references table.
+ WriteBuffer((BYTE*) &forwardReferencesTableStart, sizeof(forwardReferencesTableStart));
+
+ // Write the location of the trailer. This is the final piece of data written to the file,
+ // so that it can be easily found by a reader that can seek to the end of the file.
+ WriteBuffer((BYTE*) &current, sizeof(current));
+}
+
+#endif // FEATURE_PERFTRACING
diff --git a/src/vm/fastserializer.h b/src/vm/fastserializer.h
new file mode 100644
index 0000000000..5fd2cfd4a5
--- /dev/null
+++ b/src/vm/fastserializer.h
@@ -0,0 +1,74 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef __FASTSERIALIZER_H__
+#define __FASTSERIALIZER_H__
+
+#ifdef FEATURE_PERFTRACING
+
+#include "fastserializableobject.h"
+#include "fstream.h"
+
+class FastSerializer;
+
+typedef unsigned int StreamLabel;
+
+enum class FastSerializerTags : BYTE
+{
+ Error, // To improve debugabilty, 0 is an illegal tag.
+ NullReference, // Tag for a null object forwardReference.
+ ObjectReference, // Followed by StreamLabel
+ ForwardReference, // Followed by an index (32-bit integer) into the Forward forwardReference array and a Type object
+ BeginObject, // Followed by Type object, object data, tagged EndObject
+ BeginPrivateObject, // Like beginObject, but not placed in interning table on deserialiation
+ EndObject, // Placed after an object to mark its end.
+ ForwardDefinition, // Followed by a forward forwardReference index and an object definition (BeginObject)
+ Byte,
+ Int16,
+ Int32,
+ Int64,
+ SkipRegion,
+ String,
+ Limit, // Just past the last valid tag, used for asserts.
+};
+
+class FastSerializer
+{
+public:
+
+ FastSerializer(SString &outputFilePath, FastSerializableObject &object);
+ ~FastSerializer();
+
+ StreamLabel GetStreamLabel() const;
+
+ void WriteObject(FastSerializableObject *pObject);
+ void WriteBuffer(BYTE *pBuffer, unsigned int length);
+ void WriteTag(FastSerializerTags tag, BYTE *payload = NULL, unsigned int payloadLength = 0);
+ void WriteString(const char *strContents, unsigned int length);
+
+ unsigned int AllocateForwardReference();
+ void DefineForwardReference(unsigned int index, StreamLabel value);
+ void WriteForwardReference(unsigned int index);
+
+private:
+
+ void WriteEntryObject();
+ void WriteSerializationType(FastSerializableObject *pObject);
+ void WriteFileHeader();
+ StreamLabel WriteForwardReferenceTable();
+ void WriteTrailer(StreamLabel forwardReferencesTableStart);
+
+ CFileStream *m_pFileStream;
+ bool m_writeErrorEncountered;
+ FastSerializableObject *m_pEntryObject;
+ size_t m_currentPos;
+
+ static const unsigned int MaxForwardReferences = 100;
+ StreamLabel m_forwardReferences[MaxForwardReferences];
+ unsigned int m_nextForwardReference;
+};
+
+#endif // FEATURE_PERFTRACING
+
+#endif // __FASTSERIALIZER_H__
diff --git a/src/vm/field.cpp b/src/vm/field.cpp
index 9cdb16f6b9..defccd7269 100644
--- a/src/vm/field.cpp
+++ b/src/vm/field.cpp
@@ -738,7 +738,7 @@ void FieldDesc::SaveContents(DataImage *image)
// image.
//
- if (IsILOnlyRVAField())
+ if (IsRVA())
{
//
// Move the RVA data into the prejit image.
@@ -888,12 +888,15 @@ TypeHandle FieldDesc::GetExactFieldType(TypeHandle owner)
GetSig(&pSig, &cSig);
SigPointer sig(pSig, cSig);
+ ULONG callConv;
+ IfFailThrow(sig.GetCallingConv(&callConv));
+ _ASSERTE(callConv == IMAGE_CEE_CS_CALLCONV_FIELD);
+
// Get the generics information
SigTypeContext sigTypeContext(GetExactClassInstantiation(owner), Instantiation());
- TypeHandle thApproxFieldType = GetApproxFieldTypeHandleThrowing();
// Load the exact type
- RETURN (sig.GetTypeHandleThrowing(thApproxFieldType.GetModule(), &sigTypeContext));
+ RETURN (sig.GetTypeHandleThrowing(GetModule(), &sigTypeContext));
}
}
diff --git a/src/vm/field.h b/src/vm/field.h
index 8e762eb4e4..030a0aad86 100644
--- a/src/vm/field.h
+++ b/src/vm/field.h
@@ -274,12 +274,6 @@ public:
: dwOffset;
}
- BOOL IsILOnlyRVAField()
- {
- WRAPPER_NO_CONTRACT;
- return (IsRVA() && GetModule()->GetFile()->IsILOnly());
- }
-
DWORD IsStatic() const
{
LIMITED_METHOD_DAC_CONTRACT;
diff --git a/src/vm/i386/cgenx86.cpp b/src/vm/i386/cgenx86.cpp
index 14909b93e5..0a276c0ff9 100644
--- a/src/vm/i386/cgenx86.cpp
+++ b/src/vm/i386/cgenx86.cpp
@@ -1302,30 +1302,6 @@ extern "C" VOID STDCALL StubRareDisableTHROWWorker(Thread *pThread)
pThread->HandleThreadAbort();
}
-#ifndef FEATURE_PAL
-// Note that this logic is copied below, in PopSEHRecords
-__declspec(naked)
-VOID __cdecl PopSEHRecords(LPVOID pTargetSP)
-{
- // No CONTRACT possible on naked functions
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
-
- __asm{
- mov ecx, [esp+4] ;; ecx <- pTargetSP
- mov eax, fs:[0] ;; get current SEH record
- poploop:
- cmp eax, ecx
- jge done
- mov eax, [eax] ;; get next SEH record
- jmp poploop
- done:
- mov fs:[0], eax
- retn
- }
-}
-#endif // FEATURE_PAL
-
//////////////////////////////////////////////////////////////////////////////
//
// JITInterface
diff --git a/src/vm/i386/excepcpu.h b/src/vm/i386/excepcpu.h
index d70c6620a1..a97128b9fc 100644
--- a/src/vm/i386/excepcpu.h
+++ b/src/vm/i386/excepcpu.h
@@ -97,10 +97,6 @@ EXTERN_C LPVOID STDCALL COMPlusEndCatch(LPVOID ebp, DWORD ebx, DWORD edi, DWORD
PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(CONTEXT * pContext);
#ifdef WIN64EXCEPTIONS
PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_DISPATCHER_CONTEXT * pDispatcherContext);
-
-class FaultingExceptionFrame;
-
-FaultingExceptionFrame *GetFrameFromRedirectedStubStackFrame (DISPATCHER_CONTEXT *pDispatcherContext);
#endif // WIN64EXCEPTIONS
// Determine the address of the instruction that made the current call.
diff --git a/src/vm/i386/excepx86.cpp b/src/vm/i386/excepx86.cpp
index 8c65db75a7..cf01147341 100644
--- a/src/vm/i386/excepx86.cpp
+++ b/src/vm/i386/excepx86.cpp
@@ -370,7 +370,6 @@ CPFH_AdjustContextForThreadSuspensionRace(CONTEXT *pContext, Thread *pThread)
{
WRAPPER_NO_CONTRACT;
-#ifndef FEATURE_PAL
PCODE f_IP = GetIP(pContext);
if (Thread::IsAddrOfRedirectFunc((PVOID)f_IP)) {
@@ -427,9 +426,6 @@ CPFH_AdjustContextForThreadSuspensionRace(CONTEXT *pContext, Thread *pThread)
SetIP(pContext, GetIP(pThread->m_OSContext) - 1);
STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 4 setting IP = %x\n", pContext->Eip);
}
-#else
- PORTABILITY_ASSERT("CPFH_AdjustContextForThreadSuspensionRace");
-#endif
}
#endif // FEATURE_HIJACK
@@ -1147,7 +1143,6 @@ CPFH_RealFirstPassHandler( // ExceptionContinueSearch, etc.
pExInfo->m_pExceptionPointers = &exceptionPointers;
-#ifndef FEATURE_PAL
if (bRethrownException || bNestedException)
{
_ASSERTE(pExInfo->m_pPrevNestedInfo != NULL);
@@ -1156,7 +1151,6 @@ CPFH_RealFirstPassHandler( // ExceptionContinueSearch, etc.
SetStateForWatsonBucketing(bRethrownException, pExInfo->GetPreviousExceptionTracker()->GetThrowableAsHandle());
END_SO_INTOLERANT_CODE;
}
-#endif
#ifdef DEBUGGING_SUPPORTED
//
@@ -1971,17 +1965,10 @@ PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(CONTEXT * pContext)
}
#if !defined(DACCESS_COMPILE)
-#ifdef FEATURE_PAL
-static PEXCEPTION_REGISTRATION_RECORD CurrentSEHRecord = EXCEPTION_CHAIN_END;
-#endif
-
PEXCEPTION_REGISTRATION_RECORD GetCurrentSEHRecord()
{
WRAPPER_NO_CONTRACT;
-#ifdef FEATURE_PAL
- LPVOID fs0 = CurrentSEHRecord;
-#else // FEATURE_PAL
LPVOID fs0 = (LPVOID)__readfsdword(0);
#if 0 // This walk is too expensive considering we hit it every time we a CONTRACT(NOTHROW)
@@ -2013,24 +2000,18 @@ PEXCEPTION_REGISTRATION_RECORD GetCurrentSEHRecord()
}
#endif
#endif // 0
-#endif // FEATURE_PAL
return (EXCEPTION_REGISTRATION_RECORD*) fs0;
}
PEXCEPTION_REGISTRATION_RECORD GetFirstCOMPlusSEHRecord(Thread *pThread) {
WRAPPER_NO_CONTRACT;
-#ifndef FEATURE_PAL
EXCEPTION_REGISTRATION_RECORD *pEHR = *(pThread->GetExceptionListPtr());
if (pEHR == EXCEPTION_CHAIN_END || IsUnmanagedToManagedSEHHandler(pEHR)) {
return pEHR;
} else {
return GetNextCOMPlusSEHRecord(pEHR);
}
-#else // FEATURE_PAL
- PORTABILITY_ASSERT("GetFirstCOMPlusSEHRecord");
- return NULL;
-#endif // FEATURE_PAL
}
@@ -2056,13 +2037,30 @@ PEXCEPTION_REGISTRATION_RECORD GetPrevSEHRecord(EXCEPTION_REGISTRATION_RECORD *n
VOID SetCurrentSEHRecord(EXCEPTION_REGISTRATION_RECORD *pSEH)
{
WRAPPER_NO_CONTRACT;
-#ifndef FEATURE_PAL
*GetThread()->GetExceptionListPtr() = pSEH;
-#else // FEATURE_PAL
- _ASSERTE("NYI");
-#endif // FEATURE_PAL
}
+// Note that this logic is copied below, in PopSEHRecords
+__declspec(naked)
+VOID __cdecl PopSEHRecords(LPVOID pTargetSP)
+{
+ // No CONTRACT possible on naked functions
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ __asm{
+ mov ecx, [esp+4] ;; ecx <- pTargetSP
+ mov eax, fs:[0] ;; get current SEH record
+ poploop:
+ cmp eax, ecx
+ jge done
+ mov eax, [eax] ;; get next SEH record
+ jmp poploop
+ done:
+ mov fs:[0], eax
+ retn
+ }
+}
//
// Unwind pExinfo, pops FS:[0] handlers until the interception context SP, and
@@ -2097,7 +2095,6 @@ BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers)
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_SO_TOLERANT;
-#ifndef FEATURE_PAL
PEXCEPTION_REGISTRATION_RECORD pEHR = GetCurrentSEHRecord();
while ((LPVOID)pEHR < pTargetSP)
@@ -2153,10 +2150,6 @@ BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers)
SetCurrentSEHRecord(pEHR);
}
return FALSE;
-#else // FEATURE_PAL
- PORTABILITY_ASSERT("PopNestedExceptionRecords");
- return FALSE;
-#endif // FEATURE_PAL
}
//
@@ -2261,7 +2254,6 @@ int COMPlusThrowCallbackHelper(IJitManager *pJitManager,
int iFilt = 0;
-#ifndef FEATURE_PAL
EX_TRY
{
GCPROTECT_BEGIN (throwable);
@@ -2290,10 +2282,6 @@ int COMPlusThrowCallbackHelper(IJitManager *pJitManager,
EX_END_CATCH(SwallowAllExceptions)
return iFilt;
-#else // FEATURE_PAL
- PORTABILITY_ASSERT("COMPlusThrowCallbackHelper");
- return EXCEPTION_CONTINUE_SEARCH;
-#endif // FEATURE_PAL
}
//******************************************************************************
diff --git a/src/vm/i386/gmsx86.cpp b/src/vm/i386/gmsx86.cpp
index 2852b924ab..64ac4a38ba 100644
--- a/src/vm/i386/gmsx86.cpp
+++ b/src/vm/i386/gmsx86.cpp
@@ -894,6 +894,8 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState,
case 0x01: // ADD mod/rm
case 0x03:
+ case 0x11: // ADC mod/rm
+ case 0x13:
case 0x29: // SUB mod/rm
case 0x2B:
datasize = 0;
diff --git a/src/vm/i386/unixstubs.cpp b/src/vm/i386/unixstubs.cpp
index 8441b0794e..a9b1094b03 100644
--- a/src/vm/i386/unixstubs.cpp
+++ b/src/vm/i386/unixstubs.cpp
@@ -56,9 +56,3 @@ PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(T_DISPATCHER_CONTEXT * pDispa
PORTABILITY_ASSERT("GetCONTEXTFromRedirectedStubStackFrame");
return NULL;
}
-
-FaultingExceptionFrame *GetFrameFromRedirectedStubStackFrame(DISPATCHER_CONTEXT *pDispatcherContext)
-{
- PORTABILITY_ASSERT("GetFrameFromRedirectedStubStackFrame");
- return NULL;
-}
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index 5ef7700896..f7617ad306 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -360,7 +360,7 @@ CorInfoType CEEInfo::asCorInfoType(CorElementType eeType,
_ASSERTE((CorInfoType) map[ELEMENT_TYPE_PTR] == CORINFO_TYPE_PTR);
_ASSERTE((CorInfoType) map[ELEMENT_TYPE_TYPEDBYREF] == CORINFO_TYPE_REFANY);
- CorInfoType res = (eeType < ELEMENT_TYPE_MAX) ? ((CorInfoType) map[eeType]) : CORINFO_TYPE_UNDEF;
+ CorInfoType res = ((unsigned)eeType < ELEMENT_TYPE_MAX) ? ((CorInfoType) map[(unsigned)eeType]) : CORINFO_TYPE_UNDEF;
if (clsRet)
*clsRet = CORINFO_CLASS_HANDLE(typeHndUpdated.AsPtr());
@@ -1439,11 +1439,6 @@ static CORINFO_FIELD_ACCESSOR getFieldIntrinsic(FieldDesc * field)
{
return CORINFO_FIELD_INTRINSIC_ZERO;
}
- else
- if (MscorlibBinder::GetField(FIELD__BITCONVERTER__ISLITTLEENDIAN) == field)
- {
- return CORINFO_FIELD_INTRINSIC_ISLITTLEENDIAN;
- }
return (CORINFO_FIELD_ACCESSOR)-1;
}
@@ -12614,13 +12609,6 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Failed to load JIT compiler"));
#endif // ALLOW_SXS_JIT
}
-
- // If no compatjit wasn't used, but the user (normally a test case) requires that one is used, then fail.
- // This is analogous to ZapRequire.
- if (!jitMgr->m_fLegacyJitUsed && (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_RequireLegacyJit) == 1))
- {
- EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Failed to use legacy JIT compiler with RequireLegacyJit set"));
- }
#endif // CROSSGEN_COMPILE
#ifdef _DEBUG
diff --git a/src/vm/method.cpp b/src/vm/method.cpp
index 77a6a0d37f..34ae6d9489 100644
--- a/src/vm/method.cpp
+++ b/src/vm/method.cpp
@@ -4571,6 +4571,35 @@ c_CentralJumpCode = {
};
#include <poppack.h>
+#elif defined(_TARGET_ARM_)
+
+#include <pshpack1.h>
+struct CentralJumpCode {
+ BYTE m_ldrPC[4];
+ BYTE m_short[2];
+ MethodDescChunk *m_pChunk;
+ PCODE m_target;
+
+ inline void Setup(PCODE target, MethodDescChunk *pChunk) {
+ WRAPPER_NO_CONTRACT;
+
+ m_target = target;
+ m_pChunk = pChunk;
+ }
+
+ inline BOOL CheckTarget(TADDR target) {
+ WRAPPER_NO_CONTRACT;
+ return ((TADDR)m_target == target);
+ }
+}
+c_CentralJumpCode = {
+ { 0xDF, 0xF8, 0x08, 0xF0 }, // ldr pc, =pTarget
+ { 0x00, 0x00 }, // short offset for alignment
+ 0, // pChunk
+ 0 // pTarget
+};
+#include <poppack.h>
+
#else
#error Unsupported platform
#endif
@@ -4580,10 +4609,92 @@ typedef DPTR(struct CentralJumpCode) PTR_CentralJumpCode;
static_assert_no_msg((TEP_CENTRAL_JUMP_SIZE & 1) == 0);
#define TEP_ENTRY_SIZE 4
+
+#ifdef _TARGET_ARM_
+
+#define TEP_HALF_ENTRY_SIZE (TEP_ENTRY_SIZE / 2)
+
+// Compact entry point on arm consists of two thumb instructions:
+// mov r12, pc
+// b CentralJumpCode
+
+// First instruction 0x46fc
+#define TEP_ENTRY_INSTR1_BYTE1 0xFC
+#define TEP_ENTRY_INSTR1_BYTE2 0x46
+
+// Mask for unconditional branch opcode
+#define TEP_ENTRY_INSTR2_MASK1 0xE0
+
+// Mask for opcode
+#define TEP_ENTRY_INSTR2_MASK2 0xF8
+
+// Bit used for ARM to identify compact entry points
+#define COMPACT_ENTRY_ARM_CODE 0x2
+
+/* static */ int MethodDescChunk::GetCompactEntryPointMaxCount ()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ return MAX_OFFSET_UNCONDITIONAL_BRANCH_THUMB / TEP_ENTRY_SIZE;
+}
+
+// Get offset from the start of current compact entry point to the CentralJumpCode
+static uint16_t DecodeOffsetFromBranchToCentralJump (uint16_t instr)
+{
+ int16_t offset = decodeUnconditionalBranchThumb ((LPBYTE) &instr);
+
+ offset += PC_REG_RELATIVE_OFFSET + TEP_HALF_ENTRY_SIZE;
+
+ _ASSERTE (offset >= TEP_ENTRY_SIZE && (offset % TEP_ENTRY_SIZE == 0));
+
+ return (uint16_t) offset;
+}
+
+#ifndef DACCESS_COMPILE
+
+// Encode branch instruction to central jump for current compact entry point
+static uint16_t EncodeBranchToCentralJump (int16_t offset)
+{
+ _ASSERTE (offset >= 0 && (offset % TEP_ENTRY_SIZE == 0));
+
+ offset += TEP_HALF_ENTRY_SIZE - PC_REG_RELATIVE_OFFSET;
+
+ uint16_t instr;
+ emitUnconditionalBranchThumb ((LPBYTE) &instr, offset);
+
+ return instr;
+}
+
+#endif // DACCESS_COMPILE
+
+#else // _TARGET_ARM_
+
#define TEP_MAX_BEFORE_INDEX (1 + (127 / TEP_ENTRY_SIZE))
#define TEP_MAX_BLOCK_INDEX (TEP_MAX_BEFORE_INDEX + (128 - TEP_CENTRAL_JUMP_SIZE) / TEP_ENTRY_SIZE)
#define TEP_FULL_BLOCK_SIZE (TEP_MAX_BLOCK_INDEX * TEP_ENTRY_SIZE + TEP_CENTRAL_JUMP_SIZE)
+#endif // _TARGET_ARM_
+
+BOOL MethodDescChunk::IsCompactEntryPointAtAddress(PCODE addr)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
+
+ // Compact entrypoints start at odd addresses
+ return (addr & 1) != 0;
+
+#elif defined(_TARGET_ARM_)
+
+ // Compact entrypoints start at odd addresses (thumb) with second bit set to 1
+ uint8_t compactEntryPointMask = THUMB_CODE | COMPACT_ENTRY_ARM_CODE;
+ return (addr & compactEntryPointMask) == compactEntryPointMask;
+
+#else
+ #error Unsupported platform
+#endif
+}
+
//*******************************************************************************
/* static */ MethodDesc* MethodDescChunk::GetMethodDescFromCompactEntryPoint(PCODE addr, BOOL fSpeculative /*=FALSE*/)
{
@@ -4597,18 +4708,39 @@ static_assert_no_msg((TEP_CENTRAL_JUMP_SIZE & 1) == 0);
// Always do consistency check in debug
if (fSpeculative INDEBUG(|| TRUE))
{
+#ifdef _TARGET_ARM_
+ if (!IsCompactEntryPointAtAddress(addr))
+#else // _TARGET_ARM_
if ((addr & 3) != 1 ||
*PTR_BYTE(addr) != X86_INSTR_MOV_AL ||
*PTR_BYTE(addr+2) != X86_INSTR_JMP_REL8)
+#endif // _TARGET_ARM_
{
if (fSpeculative) return NULL;
_ASSERTE(!"Unexpected code in temporary entrypoint");
}
}
+#ifdef _TARGET_ARM_
+
+ // On ARM compact entry points are thumb
+ _ASSERTE ((addr & THUMB_CODE) != 0);
+ addr = addr - THUMB_CODE;
+
+ // Get offset for CentralJumpCode from current compact entry point
+ PTR_UINT16 pBranchInstr = (PTR_UINT16(addr)) + 1;
+ uint16_t offset = DecodeOffsetFromBranchToCentralJump (*pBranchInstr);
+
+ TADDR centralJump = addr + offset;
+ int index = (centralJump - addr - TEP_ENTRY_SIZE) / TEP_ENTRY_SIZE;
+
+#else // _TARGET_ARM_
+
int index = *PTR_BYTE(addr+1);
TADDR centralJump = addr + 4 + *PTR_SBYTE(addr+3);
+#endif // _TARGET_ARM_
+
CentralJumpCode* pCentralJumpCode = PTR_CentralJumpCode(centralJump);
// Always do consistency check in debug
@@ -4625,10 +4757,42 @@ static_assert_no_msg((TEP_CENTRAL_JUMP_SIZE & 1) == 0);
}
}
+#ifdef _TARGET_ARM_
+
+ _ASSERTE_IMPL(pCentralJumpCode->CheckTarget(GetPreStubCompactARMEntryPoint()));
+
+#else // _TARGET_ARM_
+
_ASSERTE_IMPL(pCentralJumpCode->CheckTarget(GetPreStubEntryPoint()));
+
+#endif // _TARGET_ARM_
}
+#ifdef _TARGET_ARM_
+ // Go through all MethodDesc in MethodDescChunk and find the one with the required index
+ PTR_MethodDescChunk pChunk = *((DPTR(PTR_MethodDescChunk))(centralJump + offsetof(CentralJumpCode, m_pChunk)));
+ TADDR pMD = PTR_HOST_TO_TADDR (pChunk->GetFirstMethodDesc ());
+
+ _ASSERTE (index >= 0 && index < ((int) pChunk->GetCount ()));
+
+ index = ((int) pChunk->GetCount ()) - 1 - index;
+
+ SIZE_T totalSize = 0;
+ int curIndex = 0;
+
+ while (index != curIndex)
+ {
+ SIZE_T sizeCur = (PTR_MethodDesc (pMD))->SizeOf ();
+ totalSize += sizeCur;
+
+ pMD += sizeCur;
+ ++curIndex;
+ }
+
+ return PTR_MethodDesc (pMD);
+#else // _TARGET_ARM_
return PTR_MethodDesc((TADDR)pCentralJumpCode->m_pBaseMD + index * MethodDesc::ALIGNMENT);
+#endif // _TARGET_ARM_
}
//*******************************************************************************
@@ -4636,11 +4800,19 @@ SIZE_T MethodDescChunk::SizeOfCompactEntryPoints(int count)
{
LIMITED_METHOD_DAC_CONTRACT;
+#ifdef _TARGET_ARM_
+
+ return COMPACT_ENTRY_ARM_CODE + count * TEP_ENTRY_SIZE + TEP_CENTRAL_JUMP_SIZE;
+
+#else // _TARGET_ARM_
+
int fullBlocks = count / TEP_MAX_BLOCK_INDEX;
int remainder = count % TEP_MAX_BLOCK_INDEX;
return 1 + (fullBlocks * TEP_FULL_BLOCK_SIZE) +
(remainder * TEP_ENTRY_SIZE) + ((remainder != 0) ? TEP_CENTRAL_JUMP_SIZE : 0);
+
+#endif // _TARGET_ARM_
}
#ifndef DACCESS_COMPILE
@@ -4657,16 +4829,37 @@ TADDR MethodDescChunk::AllocateCompactEntryPoints(LoaderAllocator *pLoaderAlloca
TADDR temporaryEntryPoints = (TADDR)pamTracker->Track(pLoaderAllocator->GetPrecodeHeap()->AllocAlignedMem(size, sizeof(TADDR)));
+#ifdef _TARGET_ARM_
+ BYTE* p = (BYTE*)temporaryEntryPoints + COMPACT_ENTRY_ARM_CODE;
+ int relOffset = count * TEP_ENTRY_SIZE - TEP_ENTRY_SIZE; // relative offset for the short jump
+
+ _ASSERTE (relOffset < MAX_OFFSET_UNCONDITIONAL_BRANCH_THUMB);
+#else // _TARGET_ARM_
// make the temporary entrypoints unaligned, so they are easy to identify
BYTE* p = (BYTE*)temporaryEntryPoints + 1;
+ int indexInBlock = TEP_MAX_BLOCK_INDEX; // recompute relOffset in first iteration
+ int relOffset = 0; // relative offset for the short jump
+#endif // _TARGET_ARM_
- int indexInBlock = TEP_MAX_BLOCK_INDEX; // recompute relOffset in first iteration
- int relOffset = 0; // relative offset for the short jump
MethodDesc * pBaseMD = 0; // index of the start of the block
MethodDesc * pMD = GetFirstMethodDesc();
for (int index = 0; index < count; index++)
{
+#ifdef _TARGET_ARM_
+
+ uint8_t *pMovInstrByte1 = (uint8_t *)p;
+ uint8_t *pMovInstrByte2 = (uint8_t *)p+1;
+ uint16_t *pBranchInstr = ((uint16_t *)p)+1;
+
+ *pMovInstrByte1 = TEP_ENTRY_INSTR1_BYTE1;
+ *pMovInstrByte2 = TEP_ENTRY_INSTR1_BYTE2;
+ *pBranchInstr = EncodeBranchToCentralJump ((int16_t) relOffset);
+
+ p += TEP_ENTRY_SIZE;
+
+#else // _TARGET_ARM_
+
if (indexInBlock == TEP_MAX_BLOCK_INDEX)
{
relOffset = (min(count - index, TEP_MAX_BEFORE_INDEX) - 1) * TEP_ENTRY_SIZE;
@@ -4698,14 +4891,28 @@ TADDR MethodDescChunk::AllocateCompactEntryPoints(LoaderAllocator *pLoaderAlloca
relOffset -= TEP_CENTRAL_JUMP_SIZE;
}
- relOffset -= TEP_ENTRY_SIZE;
indexInBlock++;
+#endif // _TARGET_ARM_
+
+ relOffset -= TEP_ENTRY_SIZE;
pMD = (MethodDesc *)((BYTE *)pMD + pMD->SizeOf());
}
+#ifdef _TARGET_ARM_
+
+ CentralJumpCode* pCode = (CentralJumpCode*)p;
+ memcpy(pCode, &c_CentralJumpCode, TEP_CENTRAL_JUMP_SIZE);
+ pCode->Setup (GetPreStubCompactARMEntryPoint(), this);
+
+ _ASSERTE(p + TEP_CENTRAL_JUMP_SIZE == (BYTE*)temporaryEntryPoints + size);
+
+#else // _TARGET_ARM_
+
_ASSERTE(p == (BYTE*)temporaryEntryPoints + size);
+#endif // _TARGET_ARM_
+
ClrFlushInstructionCache((LPVOID)temporaryEntryPoints, size);
SetHasCompactEntryPoints();
@@ -4725,11 +4932,19 @@ PCODE MethodDescChunk::GetTemporaryEntryPoint(int index)
#ifdef HAS_COMPACT_ENTRYPOINTS
if (HasCompactEntryPoints())
{
+#ifdef _TARGET_ARM_
+
+ return GetTemporaryEntryPoints() + COMPACT_ENTRY_ARM_CODE + THUMB_CODE + index * TEP_ENTRY_SIZE;
+
+#else // _TARGET_ARM_
+
int fullBlocks = index / TEP_MAX_BLOCK_INDEX;
int remainder = index % TEP_MAX_BLOCK_INDEX;
return GetTemporaryEntryPoints() + 1 + (fullBlocks * TEP_FULL_BLOCK_SIZE) +
(remainder * TEP_ENTRY_SIZE) + ((remainder >= TEP_MAX_BEFORE_INDEX) ? TEP_CENTRAL_JUMP_SIZE : 0);
+
+#endif // _TARGET_ARM_
}
#endif // HAS_COMPACT_ENTRYPOINTS
diff --git a/src/vm/method.hpp b/src/vm/method.hpp
index 9545da2248..3354e5799a 100644
--- a/src/vm/method.hpp
+++ b/src/vm/method.hpp
@@ -2031,23 +2031,18 @@ public:
// direct call to direct jump.
//
// We use (1) for x86 and (2) for 64-bit to get the best performance on each platform.
- //
+ // For ARM (1) is used.
TADDR AllocateCompactEntryPoints(LoaderAllocator *pLoaderAllocator, AllocMemTracker *pamTracker);
static MethodDesc* GetMethodDescFromCompactEntryPoint(PCODE addr, BOOL fSpeculative = FALSE);
static SIZE_T SizeOfCompactEntryPoints(int count);
- static BOOL IsCompactEntryPointAtAddress(PCODE addr)
- {
-#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
- // Compact entrypoints start at odd addresses
- LIMITED_METHOD_DAC_CONTRACT;
- return (addr & 1) != 0;
-#else
- #error Unsupported platform
-#endif
- }
+ static BOOL IsCompactEntryPointAtAddress(PCODE addr);
+
+#ifdef _TARGET_ARM_
+ static int GetCompactEntryPointMaxCount ();
+#endif // _TARGET_ARM_
#endif // HAS_COMPACT_ENTRYPOINTS
FORCEINLINE PTR_MethodTable GetMethodTable()
diff --git a/src/vm/methodtable.h b/src/vm/methodtable.h
index df60fca09d..2ce9f2a883 100644
--- a/src/vm/methodtable.h
+++ b/src/vm/methodtable.h
@@ -663,7 +663,7 @@ SystemVClassificationType CorInfoType2UnixAmd64Classification(CorElementType eeT
_ASSERTE((SystemVClassificationType)toSystemVAmd64ClassificationTypeMap[ELEMENT_TYPE_TYPEDBYREF] == SystemVClassificationTypeTypedReference);
_ASSERTE((SystemVClassificationType)toSystemVAmd64ClassificationTypeMap[ELEMENT_TYPE_BYREF] == SystemVClassificationTypeIntegerByRef);
- return (((int)eeType) < ELEMENT_TYPE_MAX) ? (toSystemVAmd64ClassificationTypeMap[eeType]) : SystemVClassificationTypeUnknown;
+ return (((unsigned)eeType) < ELEMENT_TYPE_MAX) ? (toSystemVAmd64ClassificationTypeMap[(unsigned)eeType]) : SystemVClassificationTypeUnknown;
};
#define SYSTEMV_EIGHT_BYTE_SIZE_IN_BYTES 8 // Size of an eightbyte in bytes.
diff --git a/src/vm/mscorlib.cpp b/src/vm/mscorlib.cpp
index 5deaaefa90..3e2d478bbf 100644
--- a/src/vm/mscorlib.cpp
+++ b/src/vm/mscorlib.cpp
@@ -72,8 +72,6 @@
#include "mdaassistants.h"
#endif
-#include "coverage.h"
-
#ifdef FEATURE_COMINTEROP
#include "variant.h"
#include "oavariant.h"
@@ -95,6 +93,7 @@
#if defined(FEATURE_EVENTSOURCE_XPLAT)
#include "nativeeventsource.h"
+#include "eventpipe.h"
#endif //defined(FEATURE_EVENTSOURCE_XPLAT)
#endif // CROSSGEN_MSCORLIB
diff --git a/src/vm/mscorlib.h b/src/vm/mscorlib.h
index 87927f687d..338ba1efda 100644
--- a/src/vm/mscorlib.h
+++ b/src/vm/mscorlib.h
@@ -146,12 +146,6 @@ DEFINE_CLASS(ARRAY, System, Array)
DEFINE_PROPERTY(ARRAY, LENGTH, Length, Int)
DEFINE_METHOD(ARRAY, GET_DATA_PTR_OFFSET_INTERNAL, GetDataPtrOffsetInternal, IM_RetInt)
-#ifdef FEATURE_NONGENERIC_COLLECTIONS
-DEFINE_CLASS(ARRAY_LIST, Collections, ArrayList)
-DEFINE_METHOD(ARRAY_LIST, CTOR, .ctor, IM_RetVoid)
-DEFINE_METHOD(ARRAY_LIST, ADD, Add, IM_Obj_RetInt)
-#endif // FEATURE_NONGENERIC_COLLECTIONS
-
DEFINE_CLASS(ARRAY_WITH_OFFSET, Interop, ArrayWithOffset)
DEFINE_FIELD(ARRAY_WITH_OFFSET, M_ARRAY, m_array)
DEFINE_FIELD(ARRAY_WITH_OFFSET, M_OFFSET, m_offset)
diff --git a/src/vm/pefile.cpp b/src/vm/pefile.cpp
index 16c66b516d..c7870e6366 100644
--- a/src/vm/pefile.cpp
+++ b/src/vm/pefile.cpp
@@ -2849,12 +2849,22 @@ PTR_ICLRPrivBinder PEFile::GetBindingContext()
PTR_ICLRPrivBinder pBindingContext = NULL;
- // Mscorlib is always bound in context of the TPA Binder. However, since it gets loaded and published
- // during EEStartup *before* TPAbinder is initialized, we dont have a binding context to publish against.
+ // CoreLibrary is always bound in context of the TPA Binder. However, since it gets loaded and published
+ // during EEStartup *before* DefaultContext Binder (aka TPAbinder) is initialized, we dont have a binding context to publish against.
// Thus, we will always return NULL for its binding context.
if (!IsSystem())
{
pBindingContext = dac_cast<PTR_ICLRPrivBinder>(GetHostAssembly());
+ if (!pBindingContext)
+ {
+ // If we do not have any binding context, check if we are dealing with
+ // a dynamically emitted assembly and if so, use its fallback load context
+ // binder reference.
+ if (IsDynamic())
+ {
+ pBindingContext = GetFallbackLoadContextBinder();
+ }
+ }
}
return pBindingContext;
diff --git a/src/vm/pefile.h b/src/vm/pefile.h
index b7c7bd3e7c..2856083123 100644
--- a/src/vm/pefile.h
+++ b/src/vm/pefile.h
@@ -631,7 +631,7 @@ protected:
// To enable this, we maintain a concept of "Fallback LoadContext", which will be set to the Binder of the
// assembly that created the dynamic assembly. If the creator assembly is dynamic itself, then its fallback
// load context would be propagated to the assembly being dynamically generated.
- ICLRPrivBinder *m_pFallbackLoadContextBinder;
+ PTR_ICLRPrivBinder m_pFallbackLoadContextBinder;
protected:
@@ -657,13 +657,13 @@ public:
bool CanUseWithBindingCache()
{ LIMITED_METHOD_CONTRACT; return !HasHostAssembly(); }
- void SetFallbackLoadContextBinder(ICLRPrivBinder *pFallbackLoadContextBinder)
+ void SetFallbackLoadContextBinder(PTR_ICLRPrivBinder pFallbackLoadContextBinder)
{
LIMITED_METHOD_CONTRACT;
m_pFallbackLoadContextBinder = pFallbackLoadContextBinder;
}
- ICLRPrivBinder *GetFallbackLoadContextBinder()
+ PTR_ICLRPrivBinder GetFallbackLoadContextBinder()
{
LIMITED_METHOD_CONTRACT;
diff --git a/src/vm/peimage.cpp b/src/vm/peimage.cpp
index 39b71ff62f..3367ef93c4 100644
--- a/src/vm/peimage.cpp
+++ b/src/vm/peimage.cpp
@@ -1189,7 +1189,13 @@ HANDLE PEImage::GetFileHandle()
}
if (m_hFile == INVALID_HANDLE_VALUE)
+ {
+#if !defined(DACCESS_COMPILE)
+ EEFileLoadException::Throw(m_path, HRESULT_FROM_WIN32(GetLastError()));
+#else // defined(DACCESS_COMPILE)
ThrowLastError();
+#endif // !defined(DACCESS_COMPILE)
+ }
return m_hFile;
}
diff --git a/src/vm/peimagelayout.cpp b/src/vm/peimagelayout.cpp
index 24166817bb..34ba4d8215 100644
--- a/src/vm/peimagelayout.cpp
+++ b/src/vm/peimagelayout.cpp
@@ -392,10 +392,21 @@ MappedImageLayout::MappedImageLayout(HANDLE hFile, PEImage* pOwner)
{
#ifndef CROSSGEN_COMPILE
+ // Capture last error as it may get reset below.
+
+ DWORD dwLastError = GetLastError();
// There is no reflection-only load on CoreCLR and so we can always throw an error here.
// It is important on Windows Phone. All assemblies that we load must have SEC_IMAGE set
// so that the OS can perform signature verification.
- ThrowLastError();
+ if (pOwner->IsFile())
+ {
+ EEFileLoadException::Throw(pOwner->GetPathForErrorMessages(), HRESULT_FROM_WIN32(dwLastError));
+ }
+ else
+ {
+ // Throw generic exception.
+ ThrowWin32(dwLastError);
+ }
#endif // CROSSGEN_COMPILE
diff --git a/src/vm/precode.cpp b/src/vm/precode.cpp
index 9707b2756b..1daf6e32b8 100644
--- a/src/vm/precode.cpp
+++ b/src/vm/precode.cpp
@@ -525,6 +525,16 @@ TADDR Precode::AllocateTemporaryEntryPoints(MethodDescChunk * pChunk,
// Note that these are just best guesses to save memory. If we guessed wrong,
// we will allocate a new exact type of precode in GetOrCreatePrecode.
BOOL fForcedPrecode = pFirstMD->RequiresStableEntryPoint(count > 1);
+
+#ifdef _TARGET_ARM_
+ if (pFirstMD->RequiresMethodDescCallingConvention(count > 1)
+ || count >= MethodDescChunk::GetCompactEntryPointMaxCount ())
+ {
+ // We do not pass method desc on scratch register
+ fForcedPrecode = TRUE;
+ }
+#endif // _TARGET_ARM_
+
if (!fForcedPrecode && (totalSize > MethodDescChunk::SizeOfCompactEntryPoints(count)))
return NULL;
#endif
diff --git a/src/vm/precode.h b/src/vm/precode.h
index 7dd4cd22f0..8947192482 100644
--- a/src/vm/precode.h
+++ b/src/vm/precode.h
@@ -170,6 +170,11 @@ public:
align = 8;
#endif // _TARGET_X86_ && HAS_FIXUP_PRECODE
+#if defined(_TARGET_ARM_) && defined(HAS_COMPACT_ENTRYPOINTS)
+ // Precodes have to be aligned to allow fast compact entry points check
+ _ASSERTE (align >= sizeof(void*));
+#endif // _TARGET_ARM_ && HAS_COMPACT_ENTRYPOINTS
+
return align;
}
diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
index 67639e99b2..fccec51bb3 100644
--- a/src/vm/prestub.cpp
+++ b/src/vm/prestub.cpp
@@ -55,6 +55,13 @@
#ifndef DACCESS_COMPILE
EXTERN_C void STDCALL ThePreStub();
+
+#if defined(HAS_COMPACT_ENTRYPOINTS) && defined (_TARGET_ARM_)
+
+EXTERN_C void STDCALL ThePreStubCompactARM();
+
+#endif // defined(HAS_COMPACT_ENTRYPOINTS) && defined (_TARGET_ARM_)
+
EXTERN_C void STDCALL ThePreStubPatch();
//==========================================================================
@@ -1002,6 +1009,21 @@ Stub * MakeInstantiatingStubWorker(MethodDesc *pMD)
}
#endif // defined(FEATURE_SHARE_GENERIC_CODE)
+#if defined (HAS_COMPACT_ENTRYPOINTS) && defined (_TARGET_ARM_)
+
+extern "C" MethodDesc * STDCALL PreStubGetMethodDescForCompactEntryPoint (PCODE pCode)
+{
+ _ASSERTE (pCode >= PC_REG_RELATIVE_OFFSET);
+
+ pCode = (PCODE) (pCode - PC_REG_RELATIVE_OFFSET + THUMB_CODE);
+
+ _ASSERTE (MethodDescChunk::IsCompactEntryPointAtAddress (pCode));
+
+ return MethodDescChunk::GetMethodDescFromCompactEntryPoint(pCode, FALSE);
+}
+
+#endif // defined (HAS_COMPACT_ENTRYPOINTS) && defined (_TARGET_ARM_)
+
//=============================================================================
// This function generates the real code for a method and installs it into
// the methoddesc. Usually ***BUT NOT ALWAYS***, this function runs only once
diff --git a/src/vm/runtimehandles.cpp b/src/vm/runtimehandles.cpp
index 4ff6512a52..7e08dadc10 100644
--- a/src/vm/runtimehandles.cpp
+++ b/src/vm/runtimehandles.cpp
@@ -2615,6 +2615,10 @@ void QCALLTYPE RuntimeMethodHandle::Destroy(MethodDesc * pMethod)
// Fire Unload Dynamic Method Event here
ETW::MethodLog::DynamicMethodDestroyed(pMethod);
+ BEGIN_PIN_PROFILER(CORProfilerIsMonitoringDynamicFunctionUnloads());
+ g_profControlBlock.pProfInterface->DynamicMethodUnloaded((FunctionID)pMethod);
+ END_PIN_PROFILER();
+
pDynamicMethodDesc->Destroy();
END_QCALL;
diff --git a/src/vm/sampleprofiler.cpp b/src/vm/sampleprofiler.cpp
index 004b3c68b0..e4721577ae 100644
--- a/src/vm/sampleprofiler.cpp
+++ b/src/vm/sampleprofiler.cpp
@@ -3,18 +3,24 @@
// See the LICENSE file in the project root for more information.
#include "common.h"
+#include "eventpipebuffermanager.h"
+#include "eventpipeeventinstance.h"
#include "sampleprofiler.h"
#include "hosting.h"
#include "threadsuspend.h"
+#ifdef FEATURE_PERFTRACING
+
Volatile<BOOL> SampleProfiler::s_profilingEnabled = false;
Thread* SampleProfiler::s_pSamplingThread = NULL;
+const GUID SampleProfiler::s_providerID = {0x3c530d44,0x97ae,0x513a,{0x1e,0x6d,0x78,0x3e,0x8f,0x8e,0x03,0xa9}}; // {3c530d44-97ae-513a-1e6d-783e8f8e03a9}
+EventPipeProvider* SampleProfiler::s_pEventPipeProvider = NULL;
+EventPipeEvent* SampleProfiler::s_pThreadTimeEvent = NULL;
+BYTE* SampleProfiler::s_pPayloadExternal = NULL;
+BYTE* SampleProfiler::s_pPayloadManaged = NULL;
CLREventStatic SampleProfiler::s_threadShutdownEvent;
-#ifdef FEATURE_PAL
long SampleProfiler::s_samplingRateInNs = 1000000; // 1ms
-#endif
-// Synchronization of multiple callers occurs in EventPipe::Enable.
void SampleProfiler::Enable()
{
CONTRACTL
@@ -23,9 +29,31 @@ void SampleProfiler::Enable()
GC_TRIGGERS;
MODE_ANY;
PRECONDITION(s_pSamplingThread == NULL);
+ // Synchronization of multiple callers occurs in EventPipe::Enable.
+ PRECONDITION(EventPipe::GetLock()->OwnedByCurrentThread());
}
CONTRACTL_END;
+ if(s_pEventPipeProvider == NULL)
+ {
+ s_pEventPipeProvider = EventPipe::CreateProvider(s_providerID);
+ s_pThreadTimeEvent = s_pEventPipeProvider->AddEvent(
+ 0, /* eventID */
+ 0, /* keywords */
+ 0, /* eventVersion */
+ EventPipeEventLevel::Informational,
+ false /* NeedStack */);
+ }
+
+ if(s_pPayloadExternal == NULL)
+ {
+ s_pPayloadExternal = new BYTE[sizeof(unsigned int)];
+ *((unsigned int *)s_pPayloadExternal) = static_cast<unsigned int>(SampleProfilerSampleType::External);
+
+ s_pPayloadManaged = new BYTE[sizeof(unsigned int)];
+ *((unsigned int *)s_pPayloadManaged) = static_cast<unsigned int>(SampleProfilerSampleType::Managed);
+ }
+
s_profilingEnabled = true;
s_pSamplingThread = SetupUnstartedThread();
if(s_pSamplingThread->CreateNewThread(0, ThreadProc, NULL))
@@ -40,7 +68,6 @@ void SampleProfiler::Enable()
}
}
-// Synchronization of multiple callers occurs in EventPipe::Disable.
void SampleProfiler::Disable()
{
CONTRACTL
@@ -48,6 +75,8 @@ void SampleProfiler::Disable()
THROWS;
GC_TRIGGERS;
MODE_ANY;
+ // Synchronization of multiple callers occurs in EventPipe::Disable.
+ PRECONDITION(EventPipe::GetLock()->OwnedByCurrentThread());
}
CONTRACTL_END;
@@ -68,6 +97,12 @@ void SampleProfiler::Disable()
s_threadShutdownEvent.Wait(0, FALSE /* bAlertable */);
}
+void SampleProfiler::SetSamplingRate(long nanoseconds)
+{
+ LIMITED_METHOD_CONTRACT;
+ s_samplingRateInNs = nanoseconds;
+}
+
DWORD WINAPI SampleProfiler::ThreadProc(void *args)
{
CONTRACTL
@@ -91,11 +126,7 @@ DWORD WINAPI SampleProfiler::ThreadProc(void *args)
if(ThreadSuspend::SysIsSuspendInProgress() || (ThreadSuspend::GetSuspensionThread() != 0))
{
// Skip the current sample.
-#ifdef FEATURE_PAL
PAL_nanosleep(s_samplingRateInNs);
-#else
- ClrSleepEx(1, FALSE);
-#endif
continue;
}
@@ -109,15 +140,11 @@ DWORD WINAPI SampleProfiler::ThreadProc(void *args)
ThreadSuspend::RestartEE(FALSE /* bFinishedGC */, TRUE /* SuspendSucceeded */);
// Wait until it's time to sample again.
-#ifdef FEATURE_PAL
PAL_nanosleep(s_samplingRateInNs);
-#else
- ClrSleepEx(1, FALSE);
-#endif
}
}
- // Destroy the sampling thread when done running.
+ // Destroy the sampling thread when it is done running.
DestroyThread(s_pSamplingThread);
s_pSamplingThread = NULL;
@@ -139,17 +166,33 @@ void SampleProfiler::WalkManagedThreads()
}
CONTRACTL_END;
- Thread *pThread = NULL;
- StackContents stackContents;
+ Thread *pTargetThread = NULL;
// Iterate over all managed threads.
// Assumes that the ThreadStoreLock is held because we've suspended all threads.
- while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ while ((pTargetThread = ThreadStore::GetThreadList(pTargetThread)) != NULL)
{
+ StackContents stackContents;
+
// Walk the stack and write it out as an event.
- if(EventPipe::WalkManagedStackForThread(pThread, stackContents) && !stackContents.IsEmpty())
+ if(EventPipe::WalkManagedStackForThread(pTargetThread, stackContents) && !stackContents.IsEmpty())
{
- EventPipe::WriteSampleProfileEvent(pThread, stackContents);
+ // Set the payload. If the GC mode on suspension > 0, then the thread was in cooperative mode.
+ // Even though there are some cases where this is not managed code, we assume it is managed code here.
+ // If the GC mode on suspension == 0 then the thread was in preemptive mode, which we qualify as external here.
+ BYTE *pPayload = s_pPayloadExternal;
+ if(pTargetThread->GetGCModeOnSuspension())
+ {
+ pPayload = s_pPayloadManaged;
+ }
+
+ // Write the sample.
+ EventPipe::WriteSampleProfileEvent(s_pSamplingThread, s_pThreadTimeEvent, pTargetThread, stackContents, pPayload, c_payloadSize);
}
+
+ // Reset the GC mode.
+ pTargetThread->ClearGCModeOnSuspension();
}
}
+
+#endif // FEATURE_PERFTRACING
diff --git a/src/vm/sampleprofiler.h b/src/vm/sampleprofiler.h
index 2c7466f4c9..02eb6b39cd 100644
--- a/src/vm/sampleprofiler.h
+++ b/src/vm/sampleprofiler.h
@@ -5,11 +5,25 @@
#ifndef __SAMPLEPROFILER_H__
#define __SAMPLEPROFILER_H__
+#ifdef FEATURE_PERFTRACING
+
#include "common.h"
#include "eventpipe.h"
+enum class SampleProfilerSampleType
+{
+ Error = 0,
+ External = 1,
+ Managed = 2
+};
+
class SampleProfiler
{
+
+ // Declare friends.
+ friend class EventPipe;
+ friend class SampleProfilerEventInstance;
+
public:
// Enable profiling.
@@ -18,13 +32,16 @@ class SampleProfiler
// Disable profiling.
static void Disable();
+ // Set the sampling rate.
+ static void SetSamplingRate(long nanoseconds);
+
private:
// Iterate through all managed threads and walk all stacks.
static void WalkManagedThreads();
// Profiling thread proc. Invoked on a new thread when profiling is enabled.
- static DWORD WINAPI SampleProfiler::ThreadProc(void *args);
+ static DWORD WINAPI ThreadProc(void *args);
// True when profiling is enabled.
static Volatile<BOOL> s_profilingEnabled;
@@ -32,13 +49,25 @@ class SampleProfiler
// The sampling thread.
static Thread *s_pSamplingThread;
+ // The provider and event emitted by the profiler.
+ static const GUID s_providerID;
+ static EventPipeProvider *s_pEventPipeProvider;
+ static EventPipeEvent *s_pThreadTimeEvent;
+
+ // Event payloads.
+ // External represents a sample in external or native code.
+ // Managed represents a sample in managed code.
+ static BYTE *s_pPayloadExternal;
+ static BYTE *s_pPayloadManaged;
+ static const unsigned int c_payloadSize = sizeof(unsigned int);
+
// Thread shutdown event for synchronization between Disable() and the sampling thread.
static CLREventStatic s_threadShutdownEvent;
-#ifdef FEATURE_PAL
// The sampling rate.
static long s_samplingRateInNs;
-#endif
};
+#endif // FEATURE_PERFTRACING
+
#endif // __SAMPLEPROFILER_H__
diff --git a/src/vm/threads.cpp b/src/vm/threads.cpp
index c36232ecb5..df8916c1f9 100644
--- a/src/vm/threads.cpp
+++ b/src/vm/threads.cpp
@@ -54,6 +54,10 @@
#include "olecontexthelpers.h"
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#ifdef FEATURE_PERFTRACING
+#include "eventpipebuffermanager.h"
+#endif // FEATURE_PERFTRACING
+
SPTR_IMPL(ThreadStore, ThreadStore, s_pThreadStore);
@@ -988,6 +992,16 @@ void DestroyThread(Thread *th)
th->SetThreadState(Thread::TS_ReportDead);
th->OnThreadTerminate(FALSE);
}
+
+#ifdef FEATURE_PERFTRACING
+ // Before the thread dies, mark its buffers as no longer owned
+ // so that they can be cleaned up after the thread dies.
+ EventPipeBufferList *pBufferList = th->GetEventPipeBufferList();
+ if(pBufferList != NULL)
+ {
+ pBufferList->SetOwnedByThread(false);
+ }
+#endif // FEATURE_PERFTRACING
}
//-------------------------------------------------------------------------
@@ -1084,6 +1098,16 @@ HRESULT Thread::DetachThread(BOOL fDLLThreadDetach)
m_pClrDebugState = NULL;
#endif //ENABLE_CONTRACTS_DATA
+#ifdef FEATURE_PERFTRACING
+ // Before the thread dies, mark its buffers as no longer owned
+ // so that they can be cleaned up after the thread dies.
+ EventPipeBufferList *pBufferList = m_pEventPipeBufferList.Load();
+ if(pBufferList != NULL)
+ {
+ pBufferList->SetOwnedByThread(false);
+ }
+#endif // FEATURE_PERFTRACING
+
FastInterlockOr((ULONG*)&m_State, (int) (Thread::TS_Detached | Thread::TS_ReportDead));
// Do not touch Thread object any more. It may be destroyed.
@@ -2008,6 +2032,11 @@ Thread::Thread()
#endif
m_pAllLoggedTypes = NULL;
+
+#ifdef FEATURE_PERFTRACING
+ m_pEventPipeBufferList = NULL;
+ m_eventWriteInProgress = false;
+#endif // FEATURE_PERFTRACING
m_HijackReturnKind = RT_Illegal;
}
diff --git a/src/vm/threads.h b/src/vm/threads.h
index 34fca24c4f..74b239d776 100644
--- a/src/vm/threads.h
+++ b/src/vm/threads.h
@@ -185,6 +185,10 @@ typedef DPTR(PTR_ThreadLocalBlock) PTR_PTR_ThreadLocalBlock;
#include "interoputil.h"
#include "eventtrace.h"
+#ifdef FEATURE_PERFTRACING
+class EventPipeBufferList;
+#endif // FEATURE_PERFTRACING
+
#ifdef CROSSGEN_COMPILE
#include "asmconstants.h"
@@ -5334,6 +5338,61 @@ public:
m_pAllLoggedTypes = pAllLoggedTypes;
}
+#ifdef FEATURE_PERFTRACING
+private:
+ // The object that contains the list write buffers used by this thread.
+ Volatile<EventPipeBufferList*> m_pEventPipeBufferList;
+
+ // Whether or not the thread is currently writing an event.
+ Volatile<bool> m_eventWriteInProgress;
+
+ // SampleProfiler thread state. This is set on suspension and cleared before restart.
+ // True if the thread was in cooperative mode. False if it was in preemptive when the suspension started.
+ Volatile<ULONG> m_gcModeOnSuspension;
+
+public:
+ EventPipeBufferList* GetEventPipeBufferList()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_pEventPipeBufferList;
+ }
+
+ void SetEventPipeBufferList(EventPipeBufferList *pList)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pEventPipeBufferList = pList;
+ }
+
+ bool GetEventWriteInProgress() const
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_eventWriteInProgress;
+ }
+
+ void SetEventWriteInProgress(bool value)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_eventWriteInProgress = value;
+ }
+
+ bool GetGCModeOnSuspension()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return m_gcModeOnSuspension;
+ }
+
+ void SaveGCModeOnSuspension()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_gcModeOnSuspension = m_fPreemptiveGCDisabled;
+ }
+
+ void ClearGCModeOnSuspension()
+ {
+ m_gcModeOnSuspension = 0;
+ }
+#endif // FEATURE_PERFTRACING
+
#ifdef FEATURE_HIJACK
private:
diff --git a/src/vm/threadsuspend.cpp b/src/vm/threadsuspend.cpp
index 65495940ed..36eb9f3698 100644
--- a/src/vm/threadsuspend.cpp
+++ b/src/vm/threadsuspend.cpp
@@ -7514,6 +7514,11 @@ void HandleGCSuspensionForInterruptedThread(CONTEXT *interruptedContext)
if (pThread->PreemptiveGCDisabled() != TRUE)
return;
+#ifdef FEATURE_PERFTRACING
+ // Mark that the thread is currently in managed code.
+ pThread->SaveGCModeOnSuspension();
+#endif // FEATURE_PERFTRACING
+
PCODE ip = GetIP(interruptedContext);
// This function can only be called when the interrupted thread is in
diff --git a/src/vm/tieredcompilation.cpp b/src/vm/tieredcompilation.cpp
index 2032e66f1b..acc26b90a5 100644
--- a/src/vm/tieredcompilation.cpp
+++ b/src/vm/tieredcompilation.cpp
@@ -337,7 +337,11 @@ void TieredCompilationManager::InstallMethodCode(MethodDesc* pMethod, PCODE pCod
_ASSERTE(!pMethod->IsNativeCodeStableAfterInit());
PCODE pExistingCode = pMethod->GetNativeCode();
+#ifdef FEATURE_INTERPRETER
+ if (!pMethod->SetNativeCodeInterlocked(pCode, pExistingCode, TRUE))
+#else
if (!pMethod->SetNativeCodeInterlocked(pCode, pExistingCode))
+#endif
{
//We aren't there yet, but when the feature is finished we shouldn't be racing against any other code mutator and there would be no
//reason for this to fail
diff --git a/src/vm/typedesc.cpp b/src/vm/typedesc.cpp
index d05cb558bc..06170cb2c1 100644
--- a/src/vm/typedesc.cpp
+++ b/src/vm/typedesc.cpp
@@ -441,15 +441,17 @@ BOOL TypeDesc::CanCastTo(TypeHandle toType, TypeHandlePairList *pVisited)
// then we must be trying to cast to a class or interface type.
if (!toType.IsTypeDesc())
{
- MethodTable *pMT = GetMethodTable();
- if (pMT == 0) {
- // I don't have an underlying method table, therefore I'm
- // a variable type, pointer type, function pointer type
+ if (!IsArray())
+ {
+ // I am a variable type, pointer type, function pointer type
// etc. I am not an object or value type. Therefore
// I can't be cast to an object or value type.
return FALSE;
}
+ MethodTable *pMT = GetMethodTable();
+ _ASSERTE(pMT != 0);
+
// This does the right thing if 'type' == System.Array or System.Object, System.Clonable ...
if (pMT->CanCastToClassOrInterface(toType.AsMethodTable(), pVisited) != 0)
{
@@ -609,15 +611,17 @@ TypeHandle::CastResult TypeDesc::CanCastToNoGC(TypeHandle toType)
// then we must be trying to cast to a class or interface type.
if (!toType.IsTypeDesc())
{
- MethodTable *pMT = GetMethodTable();
- if (pMT == 0) {
- // I don't have an underlying method table, therefore I'm
- // a variable type, pointer type, function pointer type
+ if (!IsArray())
+ {
+ // I am a variable type, pointer type, function pointer type
// etc. I am not an object or value type. Therefore
// I can't be cast to an object or value type.
return TypeHandle::CannotCast;
}
+ MethodTable *pMT = GetMethodTable();
+ _ASSERTE(pMT != 0);
+
// This does the right thing if 'type' == System.Array or System.Object, System.Clonable ...
return pMT->CanCastToClassOrInterfaceNoGC(toType.AsMethodTable());
}
diff --git a/src/vm/win32threadpool.cpp b/src/vm/win32threadpool.cpp
index bc84762b06..a79656e745 100644
--- a/src/vm/win32threadpool.cpp
+++ b/src/vm/win32threadpool.cpp
@@ -4755,15 +4755,30 @@ DWORD ThreadpoolMgr::FireTimers()
timerInfo,
QUEUE_ONLY /* TimerInfo take care of deleting*/);
- timerInfo->FiringTime = currentTime+timerInfo->Period;
+ if (timerInfo->Period != 0 && timerInfo->Period != (ULONG)-1)
+ {
+ ULONG nextFiringTime = timerInfo->FiringTime + timerInfo->Period;
+ DWORD firingInterval;
+ if (TimeExpired(timerInfo->FiringTime, currentTime, nextFiringTime))
+ {
+ // Enough time has elapsed to fire the timer yet again. The timer is not able to keep up with the short
+ // period, have it fire 1 ms from now to avoid spinning without a delay.
+ timerInfo->FiringTime = currentTime + 1;
+ firingInterval = 1;
+ }
+ else
+ {
+ timerInfo->FiringTime = nextFiringTime;
+ firingInterval = TimeInterval(nextFiringTime, currentTime);
+ }
- if ((timerInfo->Period != 0) && (timerInfo->Period != (ULONG) -1) && (nextFiringInterval > timerInfo->Period))
- nextFiringInterval = timerInfo->Period;
+ if (firingInterval < nextFiringInterval)
+ nextFiringInterval = firingInterval;
+ }
}
-
else
{
- DWORD firingInterval = TimeInterval(timerInfo->FiringTime,currentTime);
+ DWORD firingInterval = TimeInterval(timerInfo->FiringTime, currentTime);
if (firingInterval < nextFiringInterval)
nextFiringInterval = firingInterval;
}