summaryrefslogtreecommitdiff
path: root/src/vm
diff options
context:
space:
mode:
Diffstat (limited to 'src/vm')
-rw-r--r--src/vm/CMakeLists.txt31
-rw-r--r--src/vm/amd64/asmhelpers.S308
-rw-r--r--src/vm/amd64/cgenamd64.cpp13
-rw-r--r--src/vm/amd64/cgencpu.h1
-rw-r--r--src/vm/amd64/profiler.cpp25
-rw-r--r--src/vm/amd64/unixstubs.cpp15
-rw-r--r--src/vm/appdomain.cpp61
-rw-r--r--src/vm/appdomain.hpp35
-rw-r--r--src/vm/appdomainnative.cpp1
-rw-r--r--src/vm/arm/armsinglestepper.cpp12
-rw-r--r--src/vm/arm/cgencpu.h5
-rw-r--r--src/vm/arm/stubs.cpp40
-rw-r--r--src/vm/arm64/asmconstants.h34
-rw-r--r--src/vm/arm64/asmhelpers.S278
-rw-r--r--src/vm/arm64/asmhelpers.asm104
-rw-r--r--src/vm/arm64/cgencpu.h11
-rw-r--r--src/vm/arm64/stubs.cpp52
-rw-r--r--src/vm/arm64/virtualcallstubcpu.hpp4
-rw-r--r--src/vm/armsinglestepper.h4
-rw-r--r--src/vm/array.cpp11
-rw-r--r--src/vm/assembly.cpp1
-rw-r--r--src/vm/assemblyname.cpp6
-rw-r--r--src/vm/assemblynative.cpp35
-rw-r--r--src/vm/assemblynative.hpp5
-rw-r--r--src/vm/assemblyspec.cpp81
-rw-r--r--src/vm/binder.cpp101
-rw-r--r--src/vm/binder.h1
-rw-r--r--src/vm/callcounter.cpp19
-rw-r--r--src/vm/callcounter.h3
-rw-r--r--src/vm/ceeload.cpp314
-rw-r--r--src/vm/ceeload.h148
-rw-r--r--src/vm/ceeload.inl14
-rw-r--r--src/vm/ceemain.cpp26
-rw-r--r--src/vm/class.cpp247
-rw-r--r--src/vm/class.h162
-rw-r--r--src/vm/class.inl18
-rw-r--r--src/vm/classcompat.cpp7
-rw-r--r--src/vm/classnames.h1
-rw-r--r--src/vm/clrex.cpp2
-rw-r--r--src/vm/clrex.h2
-rw-r--r--src/vm/clsload.cpp456
-rw-r--r--src/vm/clsload.hpp47
-rw-r--r--src/vm/clsload.inl4
-rw-r--r--src/vm/codeman.cpp40
-rw-r--r--src/vm/codeman.h4
-rw-r--r--src/vm/codepitchingmanager.cpp522
-rw-r--r--src/vm/codeversion.cpp2862
-rw-r--r--src/vm/codeversion.h689
-rw-r--r--src/vm/comcallablewrapper.cpp43
-rw-r--r--src/vm/comcallablewrapper.h8
-rw-r--r--src/vm/comdelegate.cpp182
-rw-r--r--src/vm/comdelegate.h6
-rw-r--r--src/vm/commodule.cpp1
-rw-r--r--src/vm/compile.cpp1
-rw-r--r--src/vm/comsynchronizable.cpp50
-rw-r--r--src/vm/comsynchronizable.h1
-rw-r--r--src/vm/comthreadpool.cpp1
-rw-r--r--src/vm/comtoclrcall.cpp76
-rw-r--r--src/vm/comtoclrcall.h16
-rw-r--r--src/vm/comutilnative.cpp374
-rw-r--r--src/vm/comutilnative.h17
-rw-r--r--src/vm/crossgen/CMakeLists.txt2
-rw-r--r--src/vm/crossgen_mscorlib/CMakeLists.txt1
-rw-r--r--src/vm/crossgencompile.cpp5
-rw-r--r--src/vm/crst.h9
-rw-r--r--src/vm/customattribute.cpp1
-rw-r--r--src/vm/dataimage.cpp6
-rw-r--r--src/vm/dataimage.h78
-rw-r--r--src/vm/debughelp.cpp2
-rw-r--r--src/vm/dispatchinfo.cpp45
-rw-r--r--src/vm/dllimport.cpp43
-rw-r--r--src/vm/dllimport.h7
-rw-r--r--src/vm/dllimportcallback.cpp12
-rw-r--r--src/vm/dllimportcallback.h4
-rw-r--r--src/vm/domainfile.cpp5
-rw-r--r--src/vm/dynamicmethod.cpp1
-rw-r--r--src/vm/dynamicmethod.h7
-rw-r--r--src/vm/ecall.cpp8
-rw-r--r--src/vm/ecall.h1
-rw-r--r--src/vm/ecalllist.h16
-rw-r--r--src/vm/eeconfig.cpp23
-rw-r--r--src/vm/eeconfig.h25
-rw-r--r--src/vm/eventpipe.cpp268
-rw-r--r--src/vm/eventpipe.h85
-rw-r--r--src/vm/eventpipebuffer.cpp11
-rw-r--r--src/vm/eventpipebuffer.h3
-rw-r--r--src/vm/eventpipebuffermanager.cpp71
-rw-r--r--src/vm/eventpipebuffermanager.h4
-rw-r--r--src/vm/eventpipeconfiguration.cpp67
-rw-r--r--src/vm/eventpipeconfiguration.h8
-rw-r--r--src/vm/eventpipeeventinstance.cpp12
-rw-r--r--src/vm/eventpipefile.cpp3
-rw-r--r--src/vm/eventpipeprovider.cpp13
-rw-r--r--src/vm/eventpipeprovider.h9
-rw-r--r--src/vm/eventtrace.cpp6
-rw-r--r--src/vm/exceptionhandling.cpp35
-rw-r--r--src/vm/fastserializableobject.h35
-rw-r--r--src/vm/fastserializer.cpp4
-rw-r--r--src/vm/finalizerthread.cpp2
-rw-r--r--src/vm/frames.cpp1
-rw-r--r--src/vm/gccover.cpp10
-rw-r--r--src/vm/gcenv.ee.cpp18
-rw-r--r--src/vm/gchandleutilities.h5
-rw-r--r--src/vm/gdbjit.cpp1354
-rw-r--r--src/vm/gdbjit.h36
-rw-r--r--src/vm/genericdict.cpp2
-rw-r--r--src/vm/generics.cpp11
-rw-r--r--src/vm/genmeth.cpp28
-rw-r--r--src/vm/hosting.cpp15
-rw-r--r--src/vm/i386/cgencpu.h1
-rw-r--r--src/vm/i386/cgenx86.cpp8
-rw-r--r--src/vm/i386/stublinkerx86.cpp3
-rw-r--r--src/vm/ilinstrumentation.cpp90
-rw-r--r--src/vm/ilinstrumentation.h116
-rw-r--r--src/vm/interpreter.cpp106
-rw-r--r--src/vm/interpreter.h3
-rw-r--r--src/vm/invokeutil.cpp3
-rw-r--r--src/vm/invokeutil.h8
-rw-r--r--src/vm/jithelpers.cpp10
-rw-r--r--src/vm/jitinterface.cpp505
-rw-r--r--src/vm/jitinterface.h19
-rw-r--r--src/vm/listlock.cpp96
-rw-r--r--src/vm/listlock.h179
-rw-r--r--src/vm/listlock.inl51
-rw-r--r--src/vm/loaderallocator.cpp4
-rw-r--r--src/vm/loaderallocator.hpp4
-rw-r--r--src/vm/marshalnative.cpp1
-rw-r--r--src/vm/memberload.cpp2
-rw-r--r--src/vm/metasig.h15
-rw-r--r--src/vm/method.cpp238
-rw-r--r--src/vm/method.hpp408
-rw-r--r--src/vm/method.inl14
-rw-r--r--src/vm/methodtable.cpp226
-rw-r--r--src/vm/methodtable.h218
-rw-r--r--src/vm/methodtable.inl76
-rw-r--r--src/vm/methodtablebuilder.cpp285
-rw-r--r--src/vm/methodtablebuilder.h2
-rw-r--r--src/vm/mngstdinterfaces.cpp4
-rw-r--r--src/vm/mscorlib.h25
-rw-r--r--src/vm/multicorejit.cpp4
-rw-r--r--src/vm/multicorejit.h2
-rw-r--r--src/vm/multicorejitplayer.cpp28
-rw-r--r--src/vm/object.h6
-rw-r--r--src/vm/olevariant.cpp25
-rw-r--r--src/vm/pefile.cpp1
-rw-r--r--src/vm/perfmap.cpp43
-rw-r--r--src/vm/perfmap.h6
-rw-r--r--src/vm/precode.cpp11
-rw-r--r--src/vm/prestub.cpp1372
-rw-r--r--src/vm/profilingenumerators.cpp2
-rw-r--r--src/vm/proftoeeinterfaceimpl.cpp35
-rw-r--r--src/vm/readytoruninfo.cpp6
-rw-r--r--src/vm/reflectioninvocation.cpp221
-rw-r--r--src/vm/reflectioninvocation.h3
-rw-r--r--src/vm/rejit.cpp3248
-rw-r--r--src/vm/rejit.h482
-rw-r--r--src/vm/rejit.inl253
-rw-r--r--src/vm/runtimehandles.cpp63
-rw-r--r--src/vm/runtimehandles.h10
-rw-r--r--src/vm/sampleprofiler.cpp4
-rw-r--r--src/vm/sampleprofiler.h2
-rw-r--r--src/vm/security.cpp48
-rw-r--r--src/vm/security.h93
-rw-r--r--src/vm/siginfo.cpp10
-rw-r--r--src/vm/stdinterfaces.cpp21
-rw-r--r--src/vm/stubhelpers.cpp3
-rw-r--r--src/vm/syncblk.inl2
-rw-r--r--src/vm/threadpoolrequest.cpp7
-rw-r--r--src/vm/threadpoolrequest.h22
-rw-r--r--src/vm/threads.cpp99
-rw-r--r--src/vm/threads.h71
-rw-r--r--src/vm/tieredcompilation.cpp257
-rw-r--r--src/vm/tieredcompilation.h19
-rw-r--r--src/vm/typehandle.cpp11
-rw-r--r--src/vm/typehandle.h5
-rw-r--r--src/vm/util.cpp39
-rw-r--r--src/vm/util.hpp13
-rw-r--r--src/vm/vars.cpp1
-rw-r--r--src/vm/vars.hpp1
-rw-r--r--src/vm/virtualcallstub.cpp20
-rw-r--r--src/vm/win32threadpool.cpp18
-rw-r--r--src/vm/win32threadpool.h33
182 files changed, 10371 insertions, 9024 deletions
diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt
index adb8409558..f8790cf85d 100644
--- a/src/vm/CMakeLists.txt
+++ b/src/vm/CMakeLists.txt
@@ -30,9 +30,12 @@ if(FEATURE_GDBJIT)
set(VM_SOURCES_GDBJIT
gdbjit.cpp
)
- add_definitions(-DFEATURE_GDBJIT)
endif(FEATURE_GDBJIT)
+if(FEATURE_JIT_PITCHING)
+ add_definitions(-DFEATURE_JIT_PITCHING)
+endif(FEATURE_JIT_PITCHING)
+
set(VM_SOURCES_DAC_AND_WKS_COMMON
appdomain.cpp
array.cpp
@@ -44,6 +47,7 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON
classhash.cpp
clsload.cpp
codeman.cpp
+ codeversion.cpp
comdelegate.cpp
contractimpl.cpp
coreassemblyspec.cpp
@@ -75,6 +79,7 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON
generics.cpp
hash.cpp
hillclimbing.cpp
+ ilinstrumentation.cpp
ilstubcache.cpp
ilstubresolver.cpp
inlinetracking.cpp
@@ -95,7 +100,6 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON
precode.cpp
prestub.cpp
rejit.cpp
- security.cpp
sigformat.cpp
siginfo.cpp
spinlock.cpp
@@ -120,16 +124,10 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON
)
set( GC_SOURCES_DAC_AND_WKS_COMMON
- ../gc/gcconfig.cpp
- ../gc/gccommon.cpp
- ../gc/gcscan.cpp
- ../gc/gcsvr.cpp
- ../gc/gcwks.cpp
../gc/handletable.cpp
../gc/handletablecore.cpp
../gc/handletablescan.cpp
- ../gc/objecthandle.cpp
- ../gc/softwarewritewatch.cpp)
+ ../gc/objecthandle.cpp)
if(FEATURE_READYTORUN)
list(APPEND VM_SOURCES_DAC_AND_WKS_COMMON
@@ -137,6 +135,12 @@ if(FEATURE_READYTORUN)
)
endif(FEATURE_READYTORUN)
+if(FEATURE_JIT_PITCHING)
+ list(APPEND VM_SOURCES_DAC_AND_WKS_COMMON
+ codepitchingmanager.cpp
+ )
+endif(FEATURE_JIT_PITCHING)
+
set(VM_SOURCES_DAC
${VM_SOURCES_DAC_AND_WKS_COMMON}
contexts.cpp
@@ -196,7 +200,6 @@ set(VM_SOURCES_WKS
gccover.cpp
gcenv.ee.static.cpp
gcenv.ee.common.cpp
- gcenv.os.cpp
gchelpers.cpp
genmeth.cpp
hosting.cpp
@@ -207,7 +210,6 @@ set(VM_SOURCES_WKS
interpreter.cpp
invokeutil.cpp
jithelpers.cpp
- listlock.cpp
managedmdimport.cpp
marshalnative.cpp
marvin32.cpp
@@ -260,9 +262,15 @@ set(VM_SOURCES_WKS
set(GC_SOURCES_WKS
${GC_SOURCES_DAC_AND_WKS_COMMON}
+ ../gc/gcconfig.cpp
+ ../gc/gccommon.cpp
+ ../gc/gcscan.cpp
+ ../gc/gcsvr.cpp
+ ../gc/gcwks.cpp
../gc/gchandletable.cpp
../gc/gceesvr.cpp
../gc/gceewks.cpp
+ ../gc/softwarewritewatch.cpp
../gc/handletablecache.cpp)
if(FEATURE_EVENT_TRACE)
@@ -386,6 +394,7 @@ else(WIN32)
if(CLR_CMAKE_TARGET_ARCH_AMD64)
set(VM_SOURCES_WKS_ARCH_ASM
+ ${ARCH_SOURCES_DIR}/asmhelpers.S
${ARCH_SOURCES_DIR}/calldescrworkeramd64.S
${ARCH_SOURCES_DIR}/crthelpers.S
${ARCH_SOURCES_DIR}/externalmethodfixupthunk.S
diff --git a/src/vm/amd64/asmhelpers.S b/src/vm/amd64/asmhelpers.S
new file mode 100644
index 0000000000..78b5185eed
--- /dev/null
+++ b/src/vm/amd64/asmhelpers.S
@@ -0,0 +1,308 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+#define real4 dword
+#define real8 qword
+
+//
+// file: profile.cpp
+// typedef struct _PROFILE_PLATFORM_SPECIFIC_DATA
+// {
+// FunctionID *functionId; // function ID comes in the r11 register
+// void *rbp;
+// void *probersp;
+// void *ip;
+// void *profiledRsp;
+// UINT64 rax;
+// LPVOID hiddenArg;
+// UINT64 flt0;
+// UINT64 flt1;
+// UINT64 flt2;
+// UINT64 flt3;
+// UINT64 flt4;
+// UINT64 flt5;
+// UINT64 flt6;
+// UINT64 flt7;
+// UINT64 rdi;
+// UINT64 rsi;
+// UINT64 rdx;
+// UINT64 rcx;
+// UINT64 r8;
+// UINT64 r9;
+// UINT32 flags;
+// } PROFILE_PLATFORM_SPECIFIC_DATA, *PPROFILE_PLATFORM_SPECIFIC_DATA;
+//
+.equ SIZEOF_PROFILE_PLATFORM_SPECIFIC_DATA, 0x8*22 + 0x8 // includes fudge to make FP_SPILL right
+.equ SIZEOF_FP_ARG_SPILL, 0x10*2
+
+.equ SIZEOF_STACK_FRAME, SIZEOF_PROFILE_PLATFORM_SPECIFIC_DATA + SIZEOF_FP_ARG_SPILL
+
+.equ PROFILE_ENTER, 0x1
+.equ PROFILE_LEAVE, 0x2
+.equ PROFILE_TAILCALL, 0x4
+
+// ***********************************************************
+// NOTE:
+//
+// Register preservation scheme:
+//
+// Preserved:
+// - all non-volatile registers
+// - rax, rdx
+// - xmm0, xmm1
+//
+// Not Preserved:
+// - integer argument registers (rcx, rdx, r8, r9)
+// - floating point argument registers (xmm1-3)
+// - volatile integer registers (r10, r11)
+// - volatile floating point registers (xmm4-5)
+// - upper halves of ymm registers on AVX (which are volatile)
+//
+// ***********************************************************
+
+// EXTERN_C void ProfileEnterNaked(FunctionIDOrClientID functionIDOrClientID, size_t profiledRsp);
+// <NOTE>
+//
+// </NOTE>
+NESTED_ENTRY ProfileEnterNaked, _TEXT, NoHandler
+ // Upon entry :
+ // r14 = clientInfo
+ // r15 = profiledRsp
+
+ push_nonvol_reg rax
+
+ lea rax, [rsp + 0x10] // caller rsp
+ mov r10, [rax - 0x8] // return address
+
+ push_argument_register rdx
+ alloc_stack SIZEOF_STACK_FRAME
+
+ // correctness of return value in structure doesn't matter for enter probe
+
+ // setup ProfilePlatformSpecificData structure
+ xor r11, r11 // nullify r11
+ mov [rsp + 0x0], r11 // r11 is null -- struct functionId field
+ save_reg_postrsp rbp, 0x8 // -- struct rbp field
+ mov [rsp + 0x10], rax // caller rsp -- struct probeRsp field
+ mov [rsp + 0x18], r10 // return address -- struct ip field
+ mov [rsp + 0x20], r15 // -- struct profiledRsp field
+ mov [rsp + 0x28], r11 // return value -- struct rax field
+ mov [rsp + 0x30], r11 // r11 is null -- struct hiddenArg field
+ movsd real8 ptr [rsp + 0x38], xmm0 // -- struct flt0 field
+ movsd real8 ptr [rsp + 0x40], xmm1 // -- struct flt1 field
+ movsd real8 ptr [rsp + 0x48], xmm2 // -- struct flt2 field
+ movsd real8 ptr [rsp + 0x50], xmm3 // -- struct flt3 field
+ movsd real8 ptr [rsp + 0x58], xmm4 // -- struct flt4 field
+ movsd real8 ptr [rsp + 0x60], xmm5 // -- struct flt5 field
+ movsd real8 ptr [rsp + 0x68], xmm6 // -- struct flt6 field
+ movsd real8 ptr [rsp + 0x70], xmm7 // -- struct flt7 field
+ mov [rsp + 0x78], rdi // -- struct rdi field
+ mov [rsp + 0x80], rsi // -- struct rsi field
+ mov [rsp + 0x88], rdx // -- struct rdx field
+ mov [rsp + 0x90], rcx // -- struct rcx field
+ mov [rsp + 0x98], r8 // -- struct r8 field
+ mov [rsp + 0xa0], r9 // -- struct r9 field
+ mov r10, 0x1 // PROFILE_ENTER
+ mov [rsp + 0xa8], r10d // -- struct flags field
+
+ // get aligned stack ptr (rsp + FRAME_SIZE) & (-16)
+ lea rax, [rsp + 0xb8]
+ and rax, -16
+
+ // we need to be able to restore the fp return register
+ // save fp return registers
+ movdqa [rax + 0x00], xmm0
+ movdqa [rax + 0x10], xmm1
+
+ END_PROLOGUE
+
+ // rdi already contains the clientInfo
+ mov rdi, r14
+ lea rsi, [rsp + 0x0]
+ call C_FUNC(ProfileEnter)
+
+ // restore fp return registers
+ lea rax, [rsp + 0xb8]
+ and rax, -16
+ movdqa xmm0, [rax + 0x00]
+ movdqa xmm1, [rax + 0x10]
+
+ // restore arg registers
+ mov rdi, [rsp + 0x78]
+ mov rsi, [rsp + 0x80]
+ mov rdx, [rsp + 0x88]
+ mov rcx, [rsp + 0x90]
+ mov r8, [rsp + 0x98]
+ mov r9, [rsp + 0xa0]
+
+ // begin epilogue
+ free_stack SIZEOF_STACK_FRAME
+ pop_argument_register rdx
+
+ pop_nonvol_reg rax
+
+ ret
+NESTED_END ProfileEnterNaked, _TEXT
+
+// EXTERN_C void ProfileLeaveNaked(FunctionIDOrClientID functionIDOrClientID, size_t profiledRsp);
+// <NOTE>
+//
+// </NOTE>
+NESTED_ENTRY ProfileLeaveNaked, _TEXT, NoHandler
+// Upon entry :
+// rdi = clientInfo
+// rsi = profiledRsp
+
+ push_nonvol_reg rbx
+
+ lea rbx, [rsp + 0x10] // caller rsp
+ mov r10, [rbx - 0x8] // return address
+
+ // rdx should be saved here because it can be used for returning struct values
+ push_argument_register rdx
+ alloc_stack SIZEOF_STACK_FRAME
+
+ // correctness of argument registers in structure doesn't matter for leave probe
+
+ // setup ProfilePlatformSpecificData structure
+ xor r11, r11 // nullify r11
+ mov [rsp + 0x0], r11 // r11 is null -- struct functionId field
+ save_reg_postrsp rbp, 0x8 // -- struct rbp field
+ mov [rsp + 0x10], rbx // caller rsp -- struct probeRsp field
+ mov [rsp + 0x18], r10 // return address -- struct ip field
+ mov [rsp + 0x20], rsi // -- struct profiledRsp field
+ mov [rsp + 0x28], rax // return value -- struct rax field
+ mov [rsp + 0x30], r11 // r11 is null -- struct hiddenArg field
+ movsd real8 ptr [rsp + 0x38], xmm0 // -- struct flt0 field
+ movsd real8 ptr [rsp + 0x40], xmm1 // -- struct flt1 field
+ movsd real8 ptr [rsp + 0x48], xmm2 // -- struct flt2 field
+ movsd real8 ptr [rsp + 0x50], xmm3 // -- struct flt3 field
+ movsd real8 ptr [rsp + 0x58], xmm4 // -- struct flt4 field
+ movsd real8 ptr [rsp + 0x60], xmm5 // -- struct flt5 field
+ movsd real8 ptr [rsp + 0x68], xmm6 // -- struct flt6 field
+ movsd real8 ptr [rsp + 0x70], xmm7 // -- struct flt7 field
+ mov [rsp + 0x78], r11 // -- struct rdi field
+ mov [rsp + 0x80], r11 // -- struct rsi field
+ mov [rsp + 0x88], r11 // -- struct rdx field
+ mov [rsp + 0x90], r11 // -- struct rcx field
+ mov [rsp + 0x98], r11 // -- struct r8 field
+ mov [rsp + 0xa0], r11 // -- struct r9 field
+ mov r10, 0x2 // PROFILE_LEAVE
+ mov [rsp + 0xa8], r10d // flags -- struct flags field
+
+ // get aligned stack ptr (rsp + FRAME_SIZE) & (-16)
+ lea rax, [rsp + 0xb8]
+ and rax, -16
+
+ // we need to be able to restore the fp return register
+ // save fp return registers
+ movdqa [rax + 0x00], xmm0
+ movdqa [rax + 0x10], xmm1
+
+ END_PROLOGUE
+
+ // rdi already contains the clientInfo
+ lea rsi, [rsp + 0x0]
+ call C_FUNC(ProfileLeave)
+
+ // restore fp return registers
+ lea rax, [rsp + 0xb8]
+ and rax, -16
+ movdqa xmm0, [rax + 0x00]
+ movdqa xmm1, [rax + 0x10]
+
+ // restore int return register
+ mov rax, [rsp + 0x28]
+
+ // begin epilogue
+ free_stack SIZEOF_STACK_FRAME
+ pop_argument_register rdx
+
+ pop_nonvol_reg rbx
+
+ ret
+NESTED_END ProfileLeaveNaked, _TEXT
+
+// EXTERN_C void ProfileTailcallNaked(FunctionIDOrClientID functionIDOrClientID, size_t profiledRsp);
+// <NOTE>
+//
+// </NOTE>
+NESTED_ENTRY ProfileTailcallNaked, _TEXT, NoHandler
+// Upon entry :
+// rdi = clientInfo
+// rsi = profiledRsp
+
+ push_nonvol_reg rbx
+
+ lea rbx, [rsp + 0x10] // caller rsp
+ mov r10, [rbx - 0x8] // return address
+
+ // rdx should be saved here because it can be used for returning struct values
+ push_argument_register rdx
+ alloc_stack SIZEOF_STACK_FRAME
+
+ // correctness of argument registers in structure doesn't matter for tailcall probe
+
+ // setup ProfilePlatformSpecificData structure
+ xor r11, r11 // nullify r11
+ mov [rsp + 0x0], r11 // r11 is null -- struct functionId field
+ save_reg_postrsp rbp, 0x8 // -- struct rbp field
+ mov [rsp + 0x10], rbx // caller rsp -- struct probeRsp field
+ mov [rsp + 0x18], r10 // return address -- struct ip field
+ mov [rsp + 0x20], rsi // -- struct profiledRsp field
+ mov [rsp + 0x28], rax // return value -- struct rax field
+ mov [rsp + 0x30], r11 // r11 is null -- struct hiddenArg field
+ movsd real8 ptr [rsp + 0x38], xmm0 // -- struct flt0 field
+ movsd real8 ptr [rsp + 0x40], xmm1 // -- struct flt1 field
+ movsd real8 ptr [rsp + 0x48], xmm2 // -- struct flt2 field
+ movsd real8 ptr [rsp + 0x50], xmm3 // -- struct flt3 field
+ movsd real8 ptr [rsp + 0x58], xmm4 // -- struct flt4 field
+ movsd real8 ptr [rsp + 0x60], xmm5 // -- struct flt5 field
+ movsd real8 ptr [rsp + 0x68], xmm6 // -- struct flt6 field
+ movsd real8 ptr [rsp + 0x70], xmm7 // -- struct flt7 field
+ mov [rsp + 0x78], r11 // -- struct rdi field
+ mov [rsp + 0x80], r11 // -- struct rsi field
+ mov [rsp + 0x88], r11 // -- struct rdx field
+ mov [rsp + 0x90], r11 // -- struct rcx field
+ mov [rsp + 0x98], r11 // -- struct r8 field
+ mov [rsp + 0xa0], r11 // -- struct r9 field
+ mov r10, 0x2 // PROFILE_LEAVE
+ mov [rsp + 0xa8], r10d // flags -- struct flags field
+
+ // get aligned stack ptr (rsp + FRAME_SIZE) & (-16)
+ lea rax, [rsp + 0xc0]
+ and rax, -16
+
+ // we need to be able to restore the fp return register
+ // save fp return registers
+ movdqa [rax + 0x00], xmm0
+ movdqa [rax + 0x10], xmm1
+
+ END_PROLOGUE
+
+ // rdi already contains the clientInfo
+ lea rsi, [rsp + 0x0]
+ call C_FUNC(ProfileTailcall)
+
+ // restore fp return registers
+ lea rax, [rsp + 0xc0]
+ and rax, -16
+ movdqa xmm0, [rax + 0x00]
+ movdqa xmm1, [rax + 0x10]
+
+ // restore int return register
+ mov rax, [rsp + 0x28]
+
+ // begin epilogue
+ free_stack SIZEOF_STACK_FRAME
+ pop_argument_register rdx
+
+ pop_nonvol_reg rbx
+
+ ret
+NESTED_END ProfileTailcallNaked, _TEXT
diff --git a/src/vm/amd64/cgenamd64.cpp b/src/vm/amd64/cgenamd64.cpp
index 497abcd502..20dca22e36 100644
--- a/src/vm/amd64/cgenamd64.cpp
+++ b/src/vm/amd64/cgenamd64.cpp
@@ -670,6 +670,19 @@ void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
_ASSERTE(DbgIsExecutable(&m_movR10[0], &m_jmpRAX[3]-&m_movR10[0]));
}
+void UMEntryThunkCode::Poison()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_movR10[0] = X86_INSTR_INT3;
+}
+
UMEntryThunk* UMEntryThunk::Decode(LPVOID pCallback)
{
LIMITED_METHOD_CONTRACT;
diff --git a/src/vm/amd64/cgencpu.h b/src/vm/amd64/cgencpu.h
index 64a6501dc0..b74e3ca7d3 100644
--- a/src/vm/amd64/cgencpu.h
+++ b/src/vm/amd64/cgencpu.h
@@ -472,6 +472,7 @@ struct DECLSPEC_ALIGN(8) UMEntryThunkCode
BYTE m_padding2[5];
void Encode(BYTE* pTargetCode, void* pvSecretParam);
+ void Poison();
LPCBYTE GetEntryPoint() const
{
diff --git a/src/vm/amd64/profiler.cpp b/src/vm/amd64/profiler.cpp
index e88cbba9ee..d43df944d6 100644
--- a/src/vm/amd64/profiler.cpp
+++ b/src/vm/amd64/profiler.cpp
@@ -36,6 +36,18 @@ typedef struct _PROFILE_PLATFORM_SPECIFIC_DATA
UINT64 flt1;
UINT64 flt2;
UINT64 flt3;
+#if defined(UNIX_AMD64_ABI)
+ UINT64 flt4;
+ UINT64 flt5;
+ UINT64 flt6;
+ UINT64 flt7;
+ UINT64 rdi;
+ UINT64 rsi;
+ UINT64 rdx;
+ UINT64 rcx;
+ UINT64 r8;
+ UINT64 r9;
+#endif
UINT32 flags;
} PROFILE_PLATFORM_SPECIFIC_DATA, *PPROFILE_PLATFORM_SPECIFIC_DATA;
@@ -157,7 +169,16 @@ ProfileArgIterator::ProfileArgIterator(MetaSig * pSig, void * platformSpecificHa
index++;
}
+#ifdef UNIX_AMD64_ABI
+ switch (index)
+ {
+ case 0: pData->hiddenArg = (LPVOID)pData->rdi; break;
+ case 1: pData->hiddenArg = (LPVOID)pData->rsi; break;
+ case 2: pData->hiddenArg = (LPVOID)pData->rdx; break;
+ }
+#else
pData->hiddenArg = *(LPVOID*)((LPBYTE)pData->profiledRsp + (index * sizeof(SIZE_T)));
+#endif // UNIX_AMD64_ABI
}
}
else
@@ -309,7 +330,11 @@ LPVOID ProfileArgIterator::GetThis(void)
{
if (m_argIterator.HasThis())
{
+#ifdef UNIX_AMD64_ABI
+ return (LPVOID)pData->rdi;
+#else
return *(LPVOID*)((LPBYTE)pData->profiledRsp);
+#endif // UNIX_AMD64_ABI
}
}
diff --git a/src/vm/amd64/unixstubs.cpp b/src/vm/amd64/unixstubs.cpp
index 76d3cf1890..83764e0a22 100644
--- a/src/vm/amd64/unixstubs.cpp
+++ b/src/vm/amd64/unixstubs.cpp
@@ -11,21 +11,6 @@ extern "C"
PORTABILITY_ASSERT("Implement for PAL");
}
- void ProfileEnterNaked(FunctionIDOrClientID functionIDOrClientID)
- {
- PORTABILITY_ASSERT("Implement for PAL");
- }
-
- void ProfileLeaveNaked(FunctionIDOrClientID functionIDOrClientID)
- {
- PORTABILITY_ASSERT("Implement for PAL");
- }
-
- void ProfileTailcallNaked(FunctionIDOrClientID functionIDOrClientID)
- {
- PORTABILITY_ASSERT("Implement for PAL");
- }
-
DWORD getcpuid(DWORD arg, unsigned char result[16])
{
DWORD eax;
diff --git a/src/vm/appdomain.cpp b/src/vm/appdomain.cpp
index 946009ac06..7b0da7f5a2 100644
--- a/src/vm/appdomain.cpp
+++ b/src/vm/appdomain.cpp
@@ -8,7 +8,6 @@
#include "appdomain.hpp"
#include "peimagelayout.inl"
#include "field.h"
-#include "security.h"
#include "strongnameinternal.h"
#include "excep.h"
#include "eeconfig.h"
@@ -58,7 +57,6 @@
#include "typeequivalencehash.hpp"
#endif
-#include "listlock.inl"
#include "appdomain.inl"
#include "typeparse.h"
#include "mdaassistants.h"
@@ -80,12 +78,10 @@
#include "clrprivtypecachewinrt.h"
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
#pragma warning(push)
#pragma warning(disable:4324)
#include "marvin32.h"
#pragma warning(pop)
-#endif
// this file handles string conversion errors for itself
#undef MAKE_TRANSLATIONFAILED
@@ -711,10 +707,6 @@ OBJECTHANDLE ThreadStaticHandleTable::AllocateHandles(DWORD nRequested)
//*****************************************************************************
void BaseDomain::Attach()
{
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
- // Randomized string hashing is on by default for String.GetHashCode in coreclr.
- COMNlsHashProvider::s_NlsHashProvider.SetUseRandomHashing((CorHost2::GetStartupFlags() & STARTUP_DISABLE_RANDOMIZED_STRING_HASHING) == 0);
-#endif // FEATURE_RANDOMIZED_STRING_HASHING
m_SpecialStaticsCrst.Init(CrstSpecialStatics);
}
@@ -758,8 +750,8 @@ BaseDomain::BaseDomain()
m_ClassInitLock.PreInit();
m_ILStubGenLock.PreInit();
-#ifdef FEATURE_REJIT
- m_reJitMgr.PreInit(this == (BaseDomain *) g_pSharedDomainMemory);
+#ifdef FEATURE_CODE_VERSIONING
+ m_codeVersionManager.PreInit(this == (BaseDomain *)g_pSharedDomainMemory);
#endif
} //BaseDomain::BaseDomain
@@ -874,22 +866,23 @@ void BaseDomain::Terminate()
m_DomainLocalBlockCrst.Destroy();
m_InteropDataCrst.Destroy();
+ JitListLockEntry* pJitElement;
ListLockEntry* pElement;
// All the threads that are in this domain had better be stopped by this
// point.
//
// We might be jitting or running a .cctor so we need to empty that queue.
- pElement = m_JITLock.Pop(TRUE);
- while (pElement)
+ pJitElement = m_JITLock.Pop(TRUE);
+ while (pJitElement)
{
#ifdef STRICT_JITLOCK_ENTRY_LEAK_DETECTION
_ASSERTE ((m_JITLock.m_pHead->m_dwRefCount == 1
&& m_JITLock.m_pHead->m_hrResultCode == E_FAIL) ||
dbg_fDrasticShutdown || g_fInControlC);
#endif // STRICT_JITLOCK_ENTRY_LEAK_DETECTION
- delete(pElement);
- pElement = m_JITLock.Pop(TRUE);
+ delete(pJitElement);
+ pJitElement = m_JITLock.Pop(TRUE);
}
m_JITLock.Destroy();
@@ -2715,9 +2708,6 @@ void SystemDomain::LoadBaseSystemClasses()
// Load Object
g_pObjectClass = MscorlibBinder::GetClass(CLASS__OBJECT);
- // get the Object::.ctor method desc so we can special-case it
- g_pObjectCtorMD = MscorlibBinder::GetMethod(METHOD__OBJECT__CTOR);
-
// Now that ObjectClass is loaded, we can set up
// the system for finalizers. There is no point in deferring this, since we need
// to know this before we allocate our first object.
@@ -3741,10 +3731,6 @@ StackWalkAction SystemDomain::CallersMethodCallback(CrawlFrame* pCf, VOID* data)
/* We asked to be called back only for functions */
_ASSERTE(pFunc);
- // Ignore intercepted frames
- if(pFunc->IsInterceptedForDeclSecurity())
- return SWA_CONTINUE;
-
CallersData* pCaller = (CallersData*) data;
if(pCaller->skip == 0) {
pCaller->pMethod = pFunc;
@@ -4280,7 +4266,6 @@ void AppDomain::Init()
#endif //FEATURE_COMINTEROP
#ifdef FEATURE_TIERED_COMPILATION
- m_callCounter.SetTieredCompilationManager(GetTieredCompilationManager());
m_tieredCompilationManager.Init(GetId());
#endif
#endif // CROSSGEN_COMPILE
@@ -5040,7 +5025,7 @@ FileLoadLock::~FileLoadLock()
MODE_ANY;
}
CONTRACTL_END;
- ((PEFile *) m_pData)->Release();
+ ((PEFile *) m_data)->Release();
}
DomainFile *FileLoadLock::GetDomainFile()
@@ -7078,18 +7063,26 @@ EndTry2:;
}
else if (!fIsWellKnown)
{
- // Trigger the resolve event also for non-throw situation.
- // However, this code path will behave as if the resolve handler has thrown,
- // that is, not trigger an MDA.
_ASSERTE(fThrowOnFileNotFound == FALSE);
- AssemblySpec NewSpec(this);
- AssemblySpec *pFailedSpec = NULL;
+ // Don't trigger the resolve event for the CoreLib satellite assembly. A misbehaving resolve event may
+ // return an assembly that does not match, and this can cause recursive resource lookups during error
+ // reporting. The CoreLib satellite assembly is loaded from relative locations based on the culture, see
+ // AssemblySpec::Bind().
+ if (!pSpec->IsMscorlibSatellite())
+ {
+ // Trigger the resolve event also for non-throw situation.
+ // However, this code path will behave as if the resolve handler has thrown,
+ // that is, not trigger an MDA.
+
+ AssemblySpec NewSpec(this);
+ AssemblySpec *pFailedSpec = NULL;
- fForceReThrow = TRUE; // Managed resolve event handler can throw
+ fForceReThrow = TRUE; // Managed resolve event handler can throw
- // Purposly ignore return value
- PostBindResolveAssembly(pSpec, &NewSpec, hrBindResult, &pFailedSpec);
+ // Purposly ignore return value
+ PostBindResolveAssembly(pSpec, &NewSpec, hrBindResult, &pFailedSpec);
+ }
}
}
}
@@ -8104,7 +8097,7 @@ void AppDomain::Exit(BOOL fRunFinalizers, BOOL fAsyncExit)
// have exited the domain.
//
#ifdef FEATURE_TIERED_COMPILATION
- m_tieredCompilationManager.OnAppDomainShutdown();
+ m_tieredCompilationManager.Shutdown(FALSE);
#endif
//
@@ -8145,14 +8138,14 @@ void AppDomain::Exit(BOOL fRunFinalizers, BOOL fAsyncExit)
LOG((LF_APPDOMAIN | LF_CORDB, LL_INFO10, "AppDomain::Domain [%d] %#08x %ls is exited.\n",
GetId().m_dwId, this, GetFriendlyNameForLogging()));
- ReJitManager::OnAppDomainExit(this);
-
// Send ETW events for this domain's unload and potentially iterate through this
// domain's modules & assemblies to send events for their unloads as well. This
// needs to occur before STAGE_FINALIZED (to ensure everything is there), so we do
// this before any finalization occurs at all.
ETW::LoaderLog::DomainUnload(this);
+ CodeVersionManager::OnAppDomainExit(this);
+
//
// Spin running finalizers until we flush them all. We need to make multiple passes
// in case the finalizers create more finalizable objects. This is important to clear
diff --git a/src/vm/appdomain.hpp b/src/vm/appdomain.hpp
index 18bc73e5a5..c5af6e79bc 100644
--- a/src/vm/appdomain.hpp
+++ b/src/vm/appdomain.hpp
@@ -49,6 +49,8 @@
#include "callcounter.h"
#endif
+#include "codeversion.h"
+
class BaseDomain;
class SystemDomain;
class SharedDomain;
@@ -839,7 +841,7 @@ public:
pEntry != NULL;
pEntry = pEntry->m_pNext)
{
- if (((PEFile *)pEntry->m_pData)->Equals(pFile))
+ if (((PEFile *)pEntry->m_data)->Equals(pFile))
{
return pEntry;
}
@@ -949,6 +951,9 @@ typedef FileLoadLock::Holder FileLoadLockHolder;
typedef ReleaseHolder<FileLoadLock> FileLoadLockRefHolder;
#endif // DACCESS_COMPILE
+ typedef ListLockBase<NativeCodeVersion> JitListLock;
+ typedef ListLockEntryBase<NativeCodeVersion> JitListLockEntry;
+
#ifdef _MSC_VER
#pragma warning(push)
@@ -1204,7 +1209,7 @@ public:
return &m_ClassInitLock;
}
- ListLock* GetJitLock()
+ JitListLock* GetJitLock()
{
LIMITED_METHOD_CONTRACT;
return &m_JITLock;
@@ -1398,7 +1403,7 @@ protected:
CrstExplicitInit m_crstAssemblyList;
BOOL m_fDisableInterfaceCache; // RCW COM interface cache
ListLock m_ClassInitLock;
- ListLock m_JITLock;
+ JitListLock m_JITLock;
ListLock m_ILStubGenLock;
// Fusion context, used for adding assemblies to the is domain. It defines
@@ -1547,12 +1552,21 @@ public:
return m_dwSizedRefHandles;
}
- // Profiler rejit
+#ifdef FEATURE_CODE_VERSIONING
+private:
+ CodeVersionManager m_codeVersionManager;
+
+public:
+ CodeVersionManager* GetCodeVersionManager() { return &m_codeVersionManager; }
+#endif //FEATURE_CODE_VERSIONING
+
+#ifdef FEATURE_TIERED_COMPILATION
private:
- ReJitManager m_reJitMgr;
+ CallCounter m_callCounter;
public:
- ReJitManager * GetReJitManager() { return &m_reJitMgr; }
+ CallCounter* GetCallCounter() { return &m_callCounter; }
+#endif
#ifdef DACCESS_COMPILE
public:
@@ -3823,15 +3837,6 @@ public:
private:
TieredCompilationManager m_tieredCompilationManager;
-public:
- CallCounter * GetCallCounter()
- {
- LIMITED_METHOD_CONTRACT;
- return &m_callCounter;
- }
-
-private:
- CallCounter m_callCounter;
#endif
#ifdef FEATURE_COMINTEROP
diff --git a/src/vm/appdomainnative.cpp b/src/vm/appdomainnative.cpp
index 41259897c0..0ee950c85b 100644
--- a/src/vm/appdomainnative.cpp
+++ b/src/vm/appdomainnative.cpp
@@ -7,7 +7,6 @@
#include "common.h"
#include "appdomain.hpp"
#include "appdomainnative.hpp"
-#include "security.h"
#include "vars.hpp"
#include "eeconfig.h"
#include "appdomain.inl"
diff --git a/src/vm/arm/armsinglestepper.cpp b/src/vm/arm/armsinglestepper.cpp
index e000959ef9..bfe88244f8 100644
--- a/src/vm/arm/armsinglestepper.cpp
+++ b/src/vm/arm/armsinglestepper.cpp
@@ -97,17 +97,25 @@ ArmSingleStepper::ArmSingleStepper()
ArmSingleStepper::~ArmSingleStepper()
{
-#if !defined(DACCESS_COMPILE) && !defined(FEATURE_PAL)
+#if !defined(DACCESS_COMPILE)
+#ifdef FEATURE_PAL
+ SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->BackoutMem(m_rgCode, kMaxCodeBuffer * sizeof(WORD));
+#else
DeleteExecutable(m_rgCode);
#endif
+#endif
}
void ArmSingleStepper::Init()
{
-#if !defined(DACCESS_COMPILE) && !defined(FEATURE_PAL)
+#if !defined(DACCESS_COMPILE)
if (m_rgCode == NULL)
{
+#ifdef FEATURE_PAL
+ m_rgCode = (WORD *)(void *)SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->AllocMem(S_SIZE_T(kMaxCodeBuffer * sizeof(WORD)));
+#else
m_rgCode = new (executable) WORD[kMaxCodeBuffer];
+#endif
}
#endif
}
diff --git a/src/vm/arm/cgencpu.h b/src/vm/arm/cgencpu.h
index 181d5f10eb..2a369d8f02 100644
--- a/src/vm/arm/cgencpu.h
+++ b/src/vm/arm/cgencpu.h
@@ -566,7 +566,11 @@ public:
// a reasonable breakpoint substitute (it's what DebugBreak uses). Bkpt #0, on the other hand, always
// seems to flow directly to the kernel debugger (even if we ignore it there it doesn't seem to be
// picked up by the user mode debugger).
+#ifdef __linux__
+ Emit16(0xde01);
+#else
Emit16(0xdefe);
+#endif
}
void ThumbEmitMovConstant(ThumbReg dest, int constant)
@@ -988,6 +992,7 @@ struct DECLSPEC_ALIGN(4) UMEntryThunkCode
TADDR m_pvSecretParam;
void Encode(BYTE* pTargetCode, void* pvSecretParam);
+ void Poison();
LPCBYTE GetEntryPoint() const
{
diff --git a/src/vm/arm/stubs.cpp b/src/vm/arm/stubs.cpp
index 2e8bb19d49..4bc1b2c1ea 100644
--- a/src/vm/arm/stubs.cpp
+++ b/src/vm/arm/stubs.cpp
@@ -19,7 +19,6 @@
#include "eeconfig.h"
#include "cgensys.h"
#include "asmconstants.h"
-#include "security.h"
#include "virtualcallstub.h"
#include "gcdump.h"
#include "rtlfunctions.h"
@@ -2522,6 +2521,12 @@ void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
FlushInstructionCache(GetCurrentProcess(),&m_code,sizeof(m_code));
}
+void UMEntryThunkCode::Poison()
+{
+ // Insert 'udf 0xff' at the entry point
+ m_code[0] = 0xdeff;
+}
+
///////////////////////////// UNIMPLEMENTED //////////////////////////////////
#ifndef DACCESS_COMPILE
@@ -3599,10 +3604,8 @@ PCODE DynamicHelpers::CreateHelper(LoaderAllocator * pAllocator, TADDR arg, PCOD
END_DYNAMIC_HELPER_EMIT();
}
-PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+void DynamicHelpers::EmitHelperWithArg(BYTE*& p, LoaderAllocator * pAllocator, TADDR arg, PCODE target)
{
- BEGIN_DYNAMIC_HELPER_EMIT(18);
-
// mov r1, arg
MovRegImm(p, 1, arg);
p += 8;
@@ -3614,6 +3617,13 @@ PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR ar
// bx r12
*(WORD *)p = 0x4760;
p += 2;
+}
+
+PCODE DynamicHelpers::CreateHelperWithArg(LoaderAllocator * pAllocator, TADDR arg, PCODE target)
+{
+ BEGIN_DYNAMIC_HELPER_EMIT(18);
+
+ EmitHelperWithArg(p, pAllocator, arg, target);
END_DYNAMIC_HELPER_EMIT();
}
@@ -3762,8 +3772,26 @@ PCODE DynamicHelpers::CreateDictionaryLookupHelper(LoaderAllocator * pAllocator,
{
STANDARD_VM_CONTRACT;
- // TODO (NYI)
- ThrowHR(E_NOTIMPL);
+ PCODE helperAddress = (pLookup->helper == CORINFO_HELP_RUNTIMEHANDLE_METHOD ?
+ GetEEFuncEntryPoint(JIT_GenericHandleMethodWithSlotAndModule) :
+ GetEEFuncEntryPoint(JIT_GenericHandleClassWithSlotAndModule));
+
+ GenericHandleArgs * pArgs = (GenericHandleArgs *)(void *)pAllocator->GetDynamicHelpersHeap()->AllocAlignedMem(sizeof(GenericHandleArgs), DYNAMIC_HELPER_ALIGNMENT);
+ pArgs->dictionaryIndexAndSlot = dictionaryIndexAndSlot;
+ pArgs->signature = pLookup->signature;
+ pArgs->module = (CORINFO_MODULE_HANDLE)pModule;
+
+ // It's available only via the run-time helper function,
+ // since optimization cases are not yet implemented.
+ assert(pLookup->indirections == CORINFO_USEHELPER);
+
+ BEGIN_DYNAMIC_HELPER_EMIT(18);
+
+ EmitHelperWithArg(p, pAllocator, (TADDR)pArgs, helperAddress);
+
+ END_DYNAMIC_HELPER_EMIT();
+
+ // @TODO : Additional implementation is required for optimization cases.
}
#endif // FEATURE_READYTORUN
diff --git a/src/vm/arm64/asmconstants.h b/src/vm/arm64/asmconstants.h
index 12b72f9249..dca845d000 100644
--- a/src/vm/arm64/asmconstants.h
+++ b/src/vm/arm64/asmconstants.h
@@ -23,6 +23,13 @@
#define ASMCONSTANTS_RUNTIME_ASSERT(cond)
#endif
+// Some contants are different in _DEBUG builds. This macro factors out ifdefs from below.
+#ifdef _DEBUG
+#define DBG_FRE(dbg,fre) dbg
+#else
+#define DBG_FRE(dbg,fre) fre
+#endif
+
#define DynamicHelperFrameFlags_Default 0
#define DynamicHelperFrameFlags_ObjectArg 1
#define DynamicHelperFrameFlags_ObjectArg2 2
@@ -75,6 +82,11 @@ ASMCONSTANTS_C_ASSERT( CORINFO_NullReferenceException_ASM
== CORINFO_NullReferenceException);
+#define CORINFO_IndexOutOfRangeException_ASM 3
+ASMCONSTANTS_C_ASSERT( CORINFO_IndexOutOfRangeException_ASM
+ == CORINFO_IndexOutOfRangeException);
+
+
// Offset of the array containing the address of captured registers in MachState
#define MachState__captureX19_X29 0x0
ASMCONSTANTS_C_ASSERT(MachState__captureX19_X29 == offsetof(MachState, captureX19_X29))
@@ -114,6 +126,28 @@ ASMCONSTANTS_C_ASSERT(SIZEOF__Frame == sizeof(Frame));
ASMCONSTANTS_C_ASSERT(SIZEOF__CONTEXT == sizeof(T_CONTEXT));
+//=========================================
+#define MethodTable__m_dwFlags 0x0
+ASMCONSTANTS_C_ASSERT(MethodTable__m_dwFlags == offsetof(MethodTable, m_dwFlags));
+
+#define MethodTable__m_BaseSize 0x04
+ASMCONSTANTS_C_ASSERT(MethodTable__m_BaseSize == offsetof(MethodTable, m_BaseSize));
+
+#define MethodTable__m_ElementType DBG_FRE(0x38, 0x30)
+ASMCONSTANTS_C_ASSERT(MethodTable__m_ElementType == offsetof(MethodTable, m_pMultipurposeSlot1));
+
+#define ArrayBase__m_NumComponents 0x8
+ASMCONSTANTS_C_ASSERT(ArrayBase__m_NumComponents == offsetof(ArrayBase, m_NumComponents));
+
+#define PtrArray__m_Array 0x10
+ASMCONSTANTS_C_ASSERT(PtrArray__m_Array == offsetof(PtrArray, m_Array));
+
+#define TypeHandle_CanCast 0x1 // TypeHandle::CanCast
+
+//=========================================
+
+
+
#ifdef FEATURE_COMINTEROP
#define SIZEOF__ComMethodFrame 0x70
diff --git a/src/vm/arm64/asmhelpers.S b/src/vm/arm64/asmhelpers.S
index 2e1d0299ed..8179388c34 100644
--- a/src/vm/arm64/asmhelpers.S
+++ b/src/vm/arm64/asmhelpers.S
@@ -207,6 +207,106 @@ LEAF_END ThePreStubPatch, _TEXT
LEAF_END_MARKED \name, _TEXT
.endmacro
+// ------------------------------------------------------------------
+// Start of the writeable code region
+LEAF_ENTRY JIT_PatchedCodeStart, _TEXT
+ ret lr
+LEAF_END JIT_PatchedCodeStart, _TEXT
+
+// void JIT_UpdateWriteBarrierState(bool skipEphemeralCheck)
+//
+// Update shadow copies of the various state info required for barrier
+//
+// State info is contained in a literal pool at the end of the function
+// Placed in text section so that it is close enough to use ldr literal and still
+// be relocatable. Eliminates need for PREPARE_EXTERNAL_VAR in hot code.
+//
+// Align and group state info together so it fits in a single cache line
+// and each entry can be written atomically
+//
+WRITE_BARRIER_ENTRY JIT_UpdateWriteBarrierState
+ PROLOG_SAVE_REG_PAIR_INDEXED fp, lr, -16
+
+ // x0-x7 will contain intended new state
+ // x8 will preserve skipEphemeralCheck
+ // x12 will be used for pointers
+
+ mov x8, x0
+
+ PREPARE_EXTERNAL_VAR g_card_table, x12
+ ldr x0, [x12]
+
+#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
+ PREPARE_EXTERNAL_VAR g_card_bundle_table, x12
+ ldr x1, [x12]
+#endif
+
+#ifdef WRITE_BARRIER_CHECK
+ PREPARE_EXTERNAL_VAR g_GCShadow, x12
+ ldr x2, [x12]
+#endif
+
+#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
+ PREPARE_EXTERNAL_VAR g_sw_ww_table, x12
+ ldr x3, [x12]
+#endif
+
+ PREPARE_EXTERNAL_VAR g_ephemeral_low, x12
+ ldr x4, [x12]
+
+ PREPARE_EXTERNAL_VAR g_ephemeral_high, x12
+ ldr x5, [x12]
+
+ cbz x8, LOCAL_LABEL(EphemeralCheckEnabled)
+ movz x4, #0
+ movn x5, #0
+LOCAL_LABEL(EphemeralCheckEnabled):
+
+ PREPARE_EXTERNAL_VAR g_lowest_address, x12
+ ldr x6, [x12]
+
+ PREPARE_EXTERNAL_VAR g_highest_address, x12
+ ldr x7, [x12]
+
+ // Update wbs state
+ adr x12, LOCAL_LABEL(wbs_begin)
+
+ stp x0, x1, [x12], 16
+ stp x2, x3, [x12], 16
+ stp x4, x5, [x12], 16
+ stp x6, x7, [x12], 16
+
+ EPILOG_RESTORE_REG_PAIR_INDEXED fp, lr, 16
+ EPILOG_RETURN
+
+ // Begin patchable literal pool
+ .balign 64 // Align to power of two at least as big as patchable literal pool so that it fits optimally in cache line
+LOCAL_LABEL(wbs_begin):
+LOCAL_LABEL(wbs_card_table):
+ .quad 0
+LOCAL_LABEL(wbs_card_bundle_table):
+ .quad 0
+LOCAL_LABEL(wbs_GCShadow):
+ .quad 0
+LOCAL_LABEL(wbs_sw_ww_table):
+ .quad 0
+LOCAL_LABEL(wbs_ephemeral_low):
+ .quad 0
+LOCAL_LABEL(wbs_ephemeral_high):
+ .quad 0
+LOCAL_LABEL(wbs_lowest_address):
+ .quad 0
+LOCAL_LABEL(wbs_highest_address):
+ .quad 0
+WRITE_BARRIER_END JIT_UpdateWriteBarrierState
+
+
+// ------------------------------------------------------------------
+// End of the writeable code region
+LEAF_ENTRY JIT_PatchedCodeLast, _TEXT
+ ret lr
+LEAF_END JIT_PatchedCodeLast, _TEXT
+
// void JIT_ByRefWriteBarrier
// On entry:
// x13 : the source address (points to object reference to write)
@@ -235,19 +335,16 @@ WRITE_BARRIER_END JIT_ByRefWriteBarrier
//
// On exit:
// x12 : trashed
-// x14 : incremented by 8
+// x14 : trashed (incremented by 8 to implement JIT_ByRefWriteBarrier contract)
// x15 : trashed
// x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
//
WRITE_BARRIER_ENTRY JIT_CheckedWriteBarrier
- PREPARE_EXTERNAL_VAR g_lowest_address, x12
- ldr x12, [x12]
+ ldr x12, LOCAL_LABEL(wbs_lowest_address)
cmp x14, x12
- blt LOCAL_LABEL(NotInHeap)
- PREPARE_EXTERNAL_VAR g_highest_address, x12
- ldr x12, [x12]
- cmp x14, x12
+ ldr x12, LOCAL_LABEL(wbs_highest_address)
+ ccmp x14, x12, #0x0, ge
blt C_FUNC(JIT_WriteBarrier)
LOCAL_LABEL(NotInHeap):
@@ -262,7 +359,7 @@ WRITE_BARRIER_END JIT_CheckedWriteBarrier
//
// On exit:
// x12 : trashed
-// x14 : incremented by 8
+// x14 : trashed (incremented by 8 to implement JIT_ByRefWriteBarrier contract)
// x15 : trashed
// x17 : trashed (ip1) if FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
//
@@ -272,23 +369,24 @@ WRITE_BARRIER_ENTRY JIT_WriteBarrier
#ifdef WRITE_BARRIER_CHECK
// Update GC Shadow Heap
- // need temporary registers. Save them before using.
- stp x12, x13, [sp, #-16]!
+ // Do not perform the work if g_GCShadow is 0
+ ldr x12, LOCAL_LABEL(wbs_GCShadow)
+ cbz x12, LOCAL_LABEL(ShadowUpdateDisabled)
+
+ // need temporary register. Save before using.
+ str x13, [sp, #-16]!
// Compute address of shadow heap location:
// pShadow = g_GCShadow + (x14 - g_lowest_address)
- PREPARE_EXTERNAL_VAR g_lowest_address, x12
- ldr x12, [x12]
- sub x12, x14, x12
- PREPARE_EXTERNAL_VAR g_GCShadow, x13
- ldr x13, [x13]
+ ldr x13, LOCAL_LABEL(wbs_lowest_address)
+ sub x13, x14, x13
add x12, x13, x12
// if (pShadow >= g_GCShadowEnd) goto end
PREPARE_EXTERNAL_VAR g_GCShadowEnd, x13
ldr x13, [x13]
cmp x12, x13
- bhs LOCAL_LABEL(shadowupdateend)
+ bhs LOCAL_LABEL(ShadowUpdateEnd)
// *pShadow = x15
str x15, [x12]
@@ -300,25 +398,22 @@ WRITE_BARRIER_ENTRY JIT_WriteBarrier
// if ([x14] == x15) goto end
ldr x13, [x14]
cmp x13, x15
- beq LOCAL_LABEL(shadowupdateend)
+ beq LOCAL_LABEL(ShadowUpdateEnd)
// *pShadow = INVALIDGCVALUE (0xcccccccd)
- mov x13, #0
- movk x13, #0xcccd
+ movz x13, #0xcccd
movk x13, #0xcccc, LSL #16
str x13, [x12]
-LOCAL_LABEL(shadowupdateend):
- ldp x12, x13, [sp],#16
+LOCAL_LABEL(ShadowUpdateEnd):
+ ldr x13, [sp], #16
+LOCAL_LABEL(ShadowUpdateDisabled):
#endif
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
// Update the write watch table if necessary
- PREPARE_EXTERNAL_VAR g_sw_ww_enabled_for_gc_heap, x12
- ldrb w12, [x12]
+ ldr x12, LOCAL_LABEL(wbs_sw_ww_table)
cbz x12, LOCAL_LABEL(CheckCardTable)
- PREPARE_EXTERNAL_VAR g_sw_ww_table, x12
- ldr x12, [x12]
add x12, x12, x14, lsr #0xc // SoftwareWriteWatch::AddressToTableByteIndexShift
ldrb w17, [x12]
cbnz x17, LOCAL_LABEL(CheckCardTable)
@@ -329,20 +424,18 @@ LOCAL_LABEL(shadowupdateend):
LOCAL_LABEL(CheckCardTable):
// Branch to Exit if the reference is not in the Gen0 heap
//
- PREPARE_EXTERNAL_VAR g_ephemeral_low, x12
- ldr x12, [x12]
+ ldr x12, LOCAL_LABEL(wbs_ephemeral_low)
+ cbz x12, LOCAL_LABEL(SkipEphemeralCheck)
cmp x15, x12
- blt LOCAL_LABEL(Exit)
- PREPARE_EXTERNAL_VAR g_ephemeral_high, x12
- ldr x12, [x12]
- cmp x15, x12
+ ldr x12, LOCAL_LABEL(wbs_ephemeral_high)
+ ccmp x15, x12, 0x0, ge
bgt LOCAL_LABEL(Exit)
+LOCAL_LABEL(SkipEphemeralCheck):
// Check if we need to update the card table
- PREPARE_EXTERNAL_VAR g_card_table, x12
- ldr x12, [x12]
- add x15, x12, x14, lsr #11
+ ldr x12, LOCAL_LABEL(wbs_card_table)
+ add x15, x12, x14, lsr #11
ldrb w12, [x15]
cmp x12, 0xFF
beq LOCAL_LABEL(Exit)
@@ -352,10 +445,9 @@ LOCAL_LABEL(UpdateCardTable):
strb w12, [x15]
#ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES
- // Check if we need to update the card table
- PREPARE_EXTERNAL_VAR g_card_bundle_table, x12
- ldr x12, [x12]
- add x15, x12, x14, lsr #21
+ // Check if we need to update the card bundle table
+ ldr x12, LOCAL_LABEL(wbs_card_bundle_table)
+ add x15, x12, x14, lsr #21
ldrb w12, [x15]
cmp x12, 0xFF
beq LOCAL_LABEL(Exit)
@@ -370,18 +462,6 @@ LOCAL_LABEL(Exit):
ret lr
WRITE_BARRIER_END JIT_WriteBarrier
-// ------------------------------------------------------------------
-// Start of the writeable code region
-LEAF_ENTRY JIT_PatchedCodeStart, _TEXT
- ret lr
-LEAF_END JIT_PatchedCodeStart, _TEXT
-
-// ------------------------------------------------------------------
-// End of the writeable code region
-LEAF_ENTRY JIT_PatchedCodeLast, _TEXT
- ret lr
-LEAF_END JIT_PatchedCodeLast, _TEXT
-
//------------------------------------------------
// VirtualMethodFixupStub
//
@@ -1112,6 +1192,7 @@ LOCAL_LABEL(Promote):
mov x16, #256
str x16, [x13] // be quick to reset the counter so we don't get a bunch of contending threads
orr x11, x11, #PROMOTE_CHAIN_FLAG // set PROMOTE_CHAIN_FLAG
+ mov x12, x9 // We pass the ResolveCacheElem to ResolveWorkerAsmStub instead of the DispatchToken
LOCAL_LABEL(Fail):
b ResolveWorkerAsmStub // call the ResolveWorkerAsmStub method to transition into the VM
@@ -1301,3 +1382,100 @@ LEAF_ENTRY JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain, _TEXT
ldr x0, [x0, #DomainLocalModule__m_pGCStatics]
ret lr
LEAF_END JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain, _TEXT
+
+// ------------------------------------------------------------------
+// __declspec(naked) void F_CALL_CONV JIT_Stelem_Ref(PtrArray* array, unsigned idx, Object* val)
+LEAF_ENTRY JIT_Stelem_Ref, _TEXT
+ // We retain arguments as they were passed and use x0 == array x1 == idx x2 == val
+
+ // check for null array
+ cbz x0, LOCAL_LABEL(ThrowNullReferenceException)
+
+ // idx bounds check
+ ldr x3,[x0,#ArrayBase__m_NumComponents]
+ cmp x3, x1
+ bls LOCAL_LABEL(ThrowIndexOutOfRangeException)
+
+ // fast path to null assignment (doesn't need any write-barriers)
+ cbz x2, LOCAL_LABEL(AssigningNull)
+
+ // Verify the array-type and val-type matches before writing
+ ldr x12, [x0] // x12 = array MT
+ ldr x3, [x2] // x3 = val->GetMethodTable()
+ ldr x12, [x12, #MethodTable__m_ElementType] // array->GetArrayElementTypeHandle()
+ cmp x3, x12
+ beq C_FUNC(JIT_Stelem_DoWrite)
+
+ // Types didnt match but allow writing into an array of objects
+ ldr x3, =g_pObjectClass
+ ldr x3, [x3] // x3 = *g_pObjectClass
+ cmp x3, x12 // array type matches with Object*
+ beq C_FUNC(JIT_Stelem_DoWrite)
+
+ // array type and val type do not exactly match. Raise frame and do detailed match
+ b C_FUNC(JIT_Stelem_Ref_NotExactMatch)
+
+LOCAL_LABEL(AssigningNull):
+ // Assigning null doesn't need write barrier
+ add x0, x0, x1, LSL #3 // x0 = x0 + (x1 x 8) = array->m_array[idx]
+ str x2, [x0, #PtrArray__m_Array] // array->m_array[idx] = val
+ ret
+
+LOCAL_LABEL(ThrowNullReferenceException):
+ // Tail call JIT_InternalThrow(NullReferenceException)
+ ldr x0, =CORINFO_NullReferenceException_ASM
+ b C_FUNC(JIT_InternalThrow)
+
+LOCAL_LABEL(ThrowIndexOutOfRangeException):
+ // Tail call JIT_InternalThrow(NullReferenceException)
+ ldr x0, =CORINFO_IndexOutOfRangeException_ASM
+ b C_FUNC(JIT_InternalThrow)
+
+LEAF_END JIT_Stelem_Ref, _TEXT
+
+// ------------------------------------------------------------------
+// __declspec(naked) void F_CALL_CONV JIT_Stelem_Ref_NotExactMatch(PtrArray* array,
+// unsigned idx, Object* val)
+// x12 = array->GetArrayElementTypeHandle()
+//
+NESTED_ENTRY JIT_Stelem_Ref_NotExactMatch, _TEXT, NoHandler
+ PROLOG_SAVE_REG_PAIR_INDEXED fp, lr, -48
+ // Spill callee saved registers
+ PROLOG_SAVE_REG_PAIR x0, x1, 16
+ PROLOG_SAVE_REG x2, 32
+
+ // allow in case val can be casted to array element type
+ // call ObjIsInstanceOfNoGC(val, array->GetArrayElementTypeHandle())
+ mov x1, x12 // array->GetArrayElementTypeHandle()
+ mov x0, x2
+ bl C_FUNC(ObjIsInstanceOfNoGC)
+ cmp x0, TypeHandle_CanCast
+ beq LOCAL_LABEL(DoWrite) // ObjIsInstance returned TypeHandle::CanCast
+
+ // check via raising frame
+LOCAL_LABEL(NeedFrame):
+ add x1, sp, #16 // x1 = &array
+ add x0, sp, #32 // x0 = &val
+
+ bl C_FUNC(ArrayStoreCheck) // ArrayStoreCheck(&val, &array)
+
+LOCAL_LABEL(DoWrite):
+ EPILOG_RESTORE_REG_PAIR x0, x1, 16
+ EPILOG_RESTORE_REG x2, 32
+ EPILOG_RESTORE_REG_PAIR_INDEXED fp, lr, 48
+ b C_FUNC(JIT_Stelem_DoWrite)
+NESTED_END JIT_Stelem_Ref_NotExactMatch, _TEXT
+
+// ------------------------------------------------------------------
+// __declspec(naked) void F_CALL_CONV JIT_Stelem_DoWrite(PtrArray* array, unsigned idx, Object* val)
+LEAF_ENTRY JIT_Stelem_DoWrite, _TEXT
+
+ // Setup args for JIT_WriteBarrier. x14 = &array->m_array[idx] x15 = val
+ add x14, x0, #PtrArray__m_Array // x14 = &array->m_array
+ add x14, x14, x1, LSL #3
+ mov x15, x2 // x15 = val
+
+ // Branch to the write barrier (which is already correctly overwritten with
+ // single or multi-proc code based on the current CPU
+ b C_FUNC(JIT_WriteBarrier)
+LEAF_END JIT_Stelem_DoWrite, _TEXT
diff --git a/src/vm/arm64/asmhelpers.asm b/src/vm/arm64/asmhelpers.asm
index 8da2151459..2605a67f73 100644
--- a/src/vm/arm64/asmhelpers.asm
+++ b/src/vm/arm64/asmhelpers.asm
@@ -36,6 +36,11 @@
IMPORT DynamicHelperWorker
#endif
+ IMPORT ObjIsInstanceOfNoGC
+ IMPORT ArrayStoreCheck
+ SETALIAS g_pObjectClass, ?g_pObjectClass@@3PEAVMethodTable@@EA
+ IMPORT $g_pObjectClass
+
IMPORT g_ephemeral_low
IMPORT g_ephemeral_high
IMPORT g_lowest_address
@@ -1198,8 +1203,9 @@ Success
Promote
; Move this entry to head postion of the chain
mov x16, #256
- str x16, [x13] ; be quick to reset the counter so we don't get a bunch of contending threads
+ str x16, [x13] ; be quick to reset the counter so we don't get a bunch of contending threads
orr x11, x11, #PROMOTE_CHAIN_FLAG ; set PROMOTE_CHAIN_FLAG
+ mov x12, x9 ; We pass the ResolveCacheElem to ResolveWorkerAsmStub instead of the DispatchToken
Fail
b ResolveWorkerAsmStub ; call the ResolveWorkerAsmStub method to transition into the VM
@@ -1385,5 +1391,101 @@ CallHelper2
ret lr
LEAF_END
+; ------------------------------------------------------------------
+;__declspec(naked) void F_CALL_CONV JIT_Stelem_Ref(PtrArray* array, unsigned idx, Object* val)
+ LEAF_ENTRY JIT_Stelem_Ref
+ ; We retain arguments as they were passed and use x0 == array x1 == idx x2 == val
+
+ ; check for null array
+ cbz x0, ThrowNullReferenceException
+
+ ; idx bounds check
+ ldr x3,[x0,#ArrayBase__m_NumComponents]
+ cmp x3, x1
+ bls ThrowIndexOutOfRangeException
+
+ ; fast path to null assignment (doesn't need any write-barriers)
+ cbz x2, AssigningNull
+
+ ; Verify the array-type and val-type matches before writing
+ ldr x12, [x0] ; x12 = array MT
+ ldr x3, [x2] ; x3 = val->GetMethodTable()
+ ldr x12, [x12, #MethodTable__m_ElementType] ; array->GetArrayElementTypeHandle()
+ cmp x3, x12
+ beq JIT_Stelem_DoWrite
+
+ ; Types didnt match but allow writing into an array of objects
+ ldr x3, =$g_pObjectClass
+ ldr x3, [x3] ; x3 = *g_pObjectClass
+ cmp x3, x12 ; array type matches with Object*
+ beq JIT_Stelem_DoWrite
+
+ ; array type and val type do not exactly match. Raise frame and do detailed match
+ b JIT_Stelem_Ref_NotExactMatch
+
+AssigningNull
+ ; Assigning null doesn't need write barrier
+ add x0, x0, x1, LSL #3 ; x0 = x0 + (x1 x 8) = array->m_array[idx]
+ str x2, [x0, #PtrArray__m_Array] ; array->m_array[idx] = val
+ ret
+
+ThrowNullReferenceException
+ ; Tail call JIT_InternalThrow(NullReferenceException)
+ ldr x0, =CORINFO_NullReferenceException_ASM
+ b JIT_InternalThrow
+
+ThrowIndexOutOfRangeException
+ ; Tail call JIT_InternalThrow(NullReferenceException)
+ ldr x0, =CORINFO_IndexOutOfRangeException_ASM
+ b JIT_InternalThrow
+
+ LEAF_END
+
+; ------------------------------------------------------------------
+; __declspec(naked) void F_CALL_CONV JIT_Stelem_Ref_NotExactMatch(PtrArray* array,
+; unsigned idx, Object* val)
+; x12 = array->GetArrayElementTypeHandle()
+;
+ NESTED_ENTRY JIT_Stelem_Ref_NotExactMatch
+ PROLOG_SAVE_REG_PAIR fp, lr, #-0x48!
+ stp x0, x1, [sp, #16]
+ str x2, [sp, #32]
+
+ ; allow in case val can be casted to array element type
+ ; call ObjIsInstanceOfNoGC(val, array->GetArrayElementTypeHandle())
+ mov x1, x12 ; array->GetArrayElementTypeHandle()
+ mov x0, x2
+ bl ObjIsInstanceOfNoGC
+ cmp x0, TypeHandle_CanCast
+ beq DoWrite ; ObjIsInstance returned TypeHandle::CanCast
+
+ ; check via raising frame
+NeedFrame
+ add x1, sp, #16 ; x1 = &array
+ add x0, sp, #32 ; x0 = &val
+
+ bl ArrayStoreCheck ; ArrayStoreCheck(&val, &array)
+
+DoWrite
+ ldp x0, x1, [sp], #16
+ ldr x2, [sp], #32
+ EPILOG_RESTORE_REG_PAIR fp, lr, #0x48!
+ EPILOG_BRANCH JIT_Stelem_DoWrite
+ NESTED_END
+
+; ------------------------------------------------------------------
+; __declspec(naked) void F_CALL_CONV JIT_Stelem_DoWrite(PtrArray* array, unsigned idx, Object* val)
+ LEAF_ENTRY JIT_Stelem_DoWrite
+
+ ; Setup args for JIT_WriteBarrier. x14 = &array->m_array[idx] x15 = val
+ add x14, x0, #PtrArray__m_Array ; x14 = &array->m_array
+ add x14, x14, x1, LSL #3
+ mov x15, x2 ; x15 = val
+
+ ; Branch to the write barrier (which is already correctly overwritten with
+ ; single or multi-proc code based on the current CPU
+ b JIT_WriteBarrier
+ LEAF_END
+
; Must be at very end of file
END
diff --git a/src/vm/arm64/cgencpu.h b/src/vm/arm64/cgencpu.h
index d8bbcf7d1d..90e871a16d 100644
--- a/src/vm/arm64/cgencpu.h
+++ b/src/vm/arm64/cgencpu.h
@@ -79,6 +79,16 @@ typedef INT64 StackElemType;
// !! This expression assumes STACK_ELEM_SIZE is a power of 2.
#define StackElemSize(parmSize) (((parmSize) + STACK_ELEM_SIZE - 1) & ~((ULONG)(STACK_ELEM_SIZE - 1)))
+#ifdef FEATURE_PAL // TODO-ARM64-WINDOWS Add JIT_Stelem_Ref support
+//
+// JIT HELPERS.
+//
+// Create alias for optimized implementations of helpers provided on this platform
+//
+// optimized static helpers
+#define JIT_Stelem_Ref JIT_Stelem_Ref
+#endif
+
//**********************************************************************
// Frames
//**********************************************************************
@@ -481,6 +491,7 @@ struct DECLSPEC_ALIGN(16) UMEntryThunkCode
TADDR m_pvSecretParam;
void Encode(BYTE* pTargetCode, void* pvSecretParam);
+ void Poison();
LPCBYTE GetEntryPoint() const
{
diff --git a/src/vm/arm64/stubs.cpp b/src/vm/arm64/stubs.cpp
index 40d274959f..7e7c2e8088 100644
--- a/src/vm/arm64/stubs.cpp
+++ b/src/vm/arm64/stubs.cpp
@@ -14,11 +14,14 @@
#include "asmconstants.h"
#include "virtualcallstub.h"
#include "jitinterface.h"
+#include "ecall.h"
EXTERN_C void JIT_GetSharedNonGCStaticBase_SingleAppDomain();
EXTERN_C void JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain();
EXTERN_C void JIT_GetSharedGCStaticBase_SingleAppDomain();
EXTERN_C void JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain();
+EXTERN_C void JIT_UpdateWriteBarrierState(bool skipEphemeralCheck);
+
#ifndef DACCESS_COMPILE
//-----------------------------------------------------------------------
@@ -1086,6 +1089,31 @@ void JIT_TailCall()
#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
void InitJITHelpers1()
{
+#ifdef FEATURE_PAL // TODO
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(g_SystemInfo.dwNumberOfProcessors != 0);
+
+ // Allocation helpers, faster but non-logging
+ if (!((TrackAllocationsEnabled()) ||
+ (LoggingOn(LF_GCALLOC, LL_INFO10))
+#ifdef _DEBUG
+ || (g_pConfig->ShouldInjectFault(INJECTFAULT_GCHEAP) != 0)
+#endif // _DEBUG
+ ))
+ {
+ if (GCHeapUtilities::UseThreadAllocationContexts())
+ {
+ SetJitHelperFunction(CORINFO_HELP_NEWSFAST, JIT_NewS_MP_FastPortable);
+ SetJitHelperFunction(CORINFO_HELP_NEWSFAST_ALIGN8, JIT_NewS_MP_FastPortable);
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_VC, JIT_NewArr1VC_MP_FastPortable);
+ SetJitHelperFunction(CORINFO_HELP_NEWARR_1_OBJ, JIT_NewArr1OBJ_MP_FastPortable);
+
+ ECall::DynamicallyAssignFCallImpl(GetEEFuncEntryPoint(AllocateString_MP_FastPortable), ECall::FastAllocateString);
+ }
+ }
+#endif
+
if(IsSingleAppDomain())
{
SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE, JIT_GetSharedGCStaticBase_SingleAppDomain);
@@ -1093,7 +1121,15 @@ void InitJITHelpers1()
SetJitHelperFunction(CORINFO_HELP_GETSHARED_GCSTATIC_BASE_NOCTOR, JIT_GetSharedGCStaticBaseNoCtor_SingleAppDomain);
SetJitHelperFunction(CORINFO_HELP_GETSHARED_NONGCSTATIC_BASE_NOCTOR,JIT_GetSharedNonGCStaticBaseNoCtor_SingleAppDomain);
}
+
+ JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
}
+#ifndef FEATURE_PAL // TODO-ARM64-WINDOWS #13592
+EXTERN_C void JIT_UpdateWriteBarrierState(bool) {}
+#endif
+
+#else
+EXTERN_C void JIT_UpdateWriteBarrierState(bool) {}
#endif // !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
EXTERN_C void __stdcall ProfileEnterNaked(UINT_PTR clientData)
@@ -1244,6 +1280,11 @@ void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
FlushInstructionCache(GetCurrentProcess(),&m_code,sizeof(m_code));
}
+void UMEntryThunkCode::Poison()
+{
+ // Insert 'brk 0xbe' at the entry point
+ m_code[0] = 0xd42017c0;
+}
#ifdef PROFILING_SUPPORTED
#include "proftoeeinterfaceimpl.h"
@@ -1307,28 +1348,29 @@ LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv)
return EXCEPTION_CONTINUE_SEARCH;
}
+#ifndef CROSSGEN_COMPILE
void StompWriteBarrierEphemeral(bool isRuntimeSuspended)
{
- return;
+ JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
}
void StompWriteBarrierResize(bool isRuntimeSuspended, bool bReqUpperBoundsCheck)
{
- return;
+ JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
}
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
void SwitchToWriteWatchBarrier(bool isRuntimeSuspended)
{
- return;
+ JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
}
void SwitchToNonWriteWatchBarrier(bool isRuntimeSuspended)
{
- return;
+ JIT_UpdateWriteBarrierState(GCHeapUtilities::IsServerHeap());
}
#endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
-
+#endif // CROSSGEN_COMPILE
#ifdef DACCESS_COMPILE
BOOL GetAnyThunkTarget (T_CONTEXT *pctx, TADDR *pTarget, TADDR *pTargetMethodDesc)
diff --git a/src/vm/arm64/virtualcallstubcpu.hpp b/src/vm/arm64/virtualcallstubcpu.hpp
index b7c52091de..68c9125b00 100644
--- a/src/vm/arm64/virtualcallstubcpu.hpp
+++ b/src/vm/arm64/virtualcallstubcpu.hpp
@@ -217,8 +217,8 @@ struct ResolveHolder
//w13- this._hashedToken
//ldr w13, [x10 + DATA_OFFSET(_hashedToken)]
offset = DATA_OFFSET(_hashedToken);
- _ASSERTE(offset >=0 && offset%8 == 0);
- _stub._resolveEntryPoint[n++] = 0xB940014D | offset<<7;
+ _ASSERTE(offset >=0 && offset%4 == 0);
+ _stub._resolveEntryPoint[n++] = 0xB940014D | offset<<8;
//eor x9,x9,x13
_stub._resolveEntryPoint[n++] = 0xCA0D0129;
diff --git a/src/vm/armsinglestepper.h b/src/vm/armsinglestepper.h
index 53a10195cc..88935256c2 100644
--- a/src/vm/armsinglestepper.h
+++ b/src/vm/armsinglestepper.h
@@ -88,7 +88,11 @@ private:
kMaxCodeBuffer = 2 + 3 + 1, // WORD slots in our redirect buffer (2 for current instruction, 3 for
// breakpoint instructions used to pad out slots in an IT block and one
// for the final breakpoint)
+#ifdef __linux__
+ kBreakpointOp = 0xde01, // Opcode for the breakpoint instruction used on ARM Linux
+#else
kBreakpointOp = 0xdefe, // Opcode for the breakpoint instruction used on CoreARM
+#endif
};
// Bit numbers of the condition flags in the CPSR.
diff --git a/src/vm/array.cpp b/src/vm/array.cpp
index d6792942e7..3a33aff43a 100644
--- a/src/vm/array.cpp
+++ b/src/vm/array.cpp
@@ -310,7 +310,7 @@ MethodTable* Module::CreateArrayMethodTable(TypeHandle elemTypeHnd, CorElementTy
DWORD numNonVirtualSlots = numCtors + 3; // 3 for the proper rank Get, Set, Address
size_t cbMT = sizeof(MethodTable);
- cbMT += MethodTable::GetNumVtableIndirections(numVirtuals) * sizeof(PTR_PCODE);
+ cbMT += MethodTable::GetNumVtableIndirections(numVirtuals) * sizeof(MethodTable::VTableIndir_t);
// GC info
size_t cbCGCDescData = 0;
@@ -509,8 +509,11 @@ MethodTable* Module::CreateArrayMethodTable(TypeHandle elemTypeHnd, CorElementTy
#endif // !defined(_WIN64) && (DATA_ALIGNMENT > 4)
pMT->SetBaseSize(baseSize);
// Because of array method table persisting, we need to copy the map
- memcpy(pMTHead + imapOffset, pParentClass->GetInterfaceMap(),
- pParentClass->GetNumInterfaces() * sizeof(InterfaceInfo_t));
+ for (unsigned index = 0; index < pParentClass->GetNumInterfaces(); ++index)
+ {
+ InterfaceInfo_t *pIntInfo = (InterfaceInfo_t *) (pMTHead + imapOffset + index * sizeof(InterfaceInfo_t));
+ pIntInfo->SetMethodTable((pParentClass->GetInterfaceMap() + index)->GetMethodTable());
+ }
pMT->SetInterfaceMap(pParentClass->GetNumInterfaces(), (InterfaceInfo_t *)(pMTHead + imapOffset));
// Copy down flags for these interfaces as well. This is simplified a bit since we know that System.Array
@@ -536,7 +539,7 @@ MethodTable* Module::CreateArrayMethodTable(TypeHandle elemTypeHnd, CorElementTy
if (canShareVtableChunks)
{
// Share the parent chunk
- it.SetIndirectionSlot(pParentClass->GetVtableIndirections()[it.GetIndex()]);
+ it.SetIndirectionSlot(pParentClass->GetVtableIndirections()[it.GetIndex()].GetValueMaybeNull());
}
else
{
diff --git a/src/vm/assembly.cpp b/src/vm/assembly.cpp
index c9a995452c..32a7cd9969 100644
--- a/src/vm/assembly.cpp
+++ b/src/vm/assembly.cpp
@@ -18,7 +18,6 @@
#include "assembly.hpp"
#include "appdomain.hpp"
-#include "security.h"
#include "perfcounters.h"
#include "assemblyname.hpp"
diff --git a/src/vm/assemblyname.cpp b/src/vm/assemblyname.cpp
index bc6034ae63..f0ed60bb6d 100644
--- a/src/vm/assemblyname.cpp
+++ b/src/vm/assemblyname.cpp
@@ -20,7 +20,6 @@
#include <shlwapi.h>
#include "assemblyname.hpp"
-#include "security.h"
#include "field.h"
#include "strongname.h"
#include "eeconfig.h"
@@ -111,7 +110,7 @@ FCIMPL1(Object*, AssemblyNameNative::GetPublicKeyToken, Object* refThisUNSAFE)
{
FCALL_CONTRACT;
- OBJECTREF orOutputArray = NULL;
+ U1ARRAYREF orOutputArray = NULL;
OBJECTREF refThis = (OBJECTREF) refThisUNSAFE;
HELPER_METHOD_FRAME_BEGIN_RET_1(refThis);
@@ -137,7 +136,8 @@ FCIMPL1(Object*, AssemblyNameNative::GetPublicKeyToken, Object* refThisUNSAFE)
}
}
- Security::CopyEncodingToByteArray(pbToken, cb, &orOutputArray);
+ orOutputArray = (U1ARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_U1, cb);
+ memcpyNoGCRefs(orOutputArray->m_Array, pbToken, cb);
}
HELPER_METHOD_FRAME_END();
diff --git a/src/vm/assemblynative.cpp b/src/vm/assemblynative.cpp
index e4f148a712..f0cfe9376c 100644
--- a/src/vm/assemblynative.cpp
+++ b/src/vm/assemblynative.cpp
@@ -23,7 +23,6 @@
#include "field.h"
#include "assemblyname.hpp"
#include "eeconfig.h"
-#include "security.h"
#include "strongname.h"
#include "interoputil.h"
#include "frames.h"
@@ -430,6 +429,40 @@ void QCALLTYPE AssemblyNative::GetType(QCall::AssemblyHandle pAssembly, LPCWSTR
return;
}
+void QCALLTYPE AssemblyNative::GetForwardedType(QCall::AssemblyHandle pAssembly, mdToken mdtExternalType, QCall::ObjectHandleOnStack retType)
+{
+ CONTRACTL
+ {
+ QCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ BEGIN_QCALL;
+
+ HRESULT hr;
+ LPCSTR pszNameSpace;
+ LPCSTR pszClassName;
+ mdToken mdImpl;
+
+ Assembly * pAsm = pAssembly->GetAssembly();
+ Module *pManifestModule = pAsm->GetManifestModule();
+ IfFailThrow(pManifestModule->GetMDImport()->GetExportedTypeProps(mdtExternalType, &pszNameSpace, &pszClassName, &mdImpl, NULL, NULL));
+ if (TypeFromToken(mdImpl) == mdtAssemblyRef)
+ {
+ NameHandle typeName(pszNameSpace, pszClassName);
+ typeName.SetTypeToken(pManifestModule, mdtExternalType);
+ TypeHandle typeHnd = pAsm->GetLoader()->LoadTypeHandleThrowIfFailed(&typeName);
+ {
+ GCX_COOP();
+ retType.Set(typeHnd.GetManagedClassObject());
+ }
+ }
+
+ END_QCALL;
+
+ return;
+}
+
FCIMPL1(FC_BOOL_RET, AssemblyNative::IsDynamic, AssemblyBaseObject* pAssemblyUNSAFE)
{
FCALL_CONTRACT;
diff --git a/src/vm/assemblynative.hpp b/src/vm/assemblynative.hpp
index 267231bd99..3f7362188d 100644
--- a/src/vm/assemblynative.hpp
+++ b/src/vm/assemblynative.hpp
@@ -90,7 +90,10 @@ public:
static
void QCALLTYPE GetType(QCall::AssemblyHandle pAssembly, LPCWSTR wszName, BOOL bThrowOnError, BOOL bIgnoreCase, QCall::ObjectHandleOnStack retType, QCall::ObjectHandleOnStack keepAlive);
-
+
+ static
+ void QCALLTYPE GetForwardedType(QCall::AssemblyHandle pAssembly, mdToken mdtExternalType, QCall::ObjectHandleOnStack retType);
+
static
INT32 QCALLTYPE GetManifestResourceInfo(QCall::AssemblyHandle pAssembly, LPCWSTR wszName, QCall::ObjectHandleOnStack retAssembly, QCall::StringHandleOnStack retFileName, QCall::StackCrawlMarkHandle stackMark);
diff --git a/src/vm/assemblyspec.cpp b/src/vm/assemblyspec.cpp
index 9ec1d97086..7f2829de99 100644
--- a/src/vm/assemblyspec.cpp
+++ b/src/vm/assemblyspec.cpp
@@ -19,7 +19,6 @@
#include <stdlib.h>
#include "assemblyspec.hpp"
-#include "security.h"
#include "eeconfig.h"
#include "strongname.h"
#include "strongnameholders.h"
@@ -495,18 +494,68 @@ void AssemblySpec::AssemblyNameInit(ASSEMBLYNAMEREF* pAsmName, PEImage* pImageIn
// version
gc.Version = AllocateObject(pVersion);
-
- MethodDescCallSite ctorMethod(METHOD__VERSION__CTOR);
-
- ARG_SLOT VersionArgs[5] =
+ // BaseAssemblySpec and AssemblyName properties store uint16 components for the version. Version and AssemblyVersion
+ // store int32 or uint32. When the former are initialized from the latter, the components are truncated to uint16 size.
+ // When the latter are initialized from the former, they are zero-extended to int32 size. For uint16 components, the max
+ // value is used to indicate an unspecified component. For int32 components, -1 is used. Since we're initializing a
+ // Version from an assembly version, map the uint16 unspecified value to the int32 size.
+ int componentCount = 2;
+ if (m_context.usBuildNumber != (USHORT)-1)
{
- ObjToArgSlot(gc.Version),
- (ARG_SLOT) m_context.usMajorVersion,
- (ARG_SLOT) m_context.usMinorVersion,
- (ARG_SLOT) m_context.usBuildNumber,
- (ARG_SLOT) m_context.usRevisionNumber,
- };
- ctorMethod.Call(VersionArgs);
+ ++componentCount;
+ if (m_context.usRevisionNumber != (USHORT)-1)
+ {
+ ++componentCount;
+ }
+ }
+ switch (componentCount)
+ {
+ case 2:
+ {
+ // Call Version(int, int) because Version(int, int, int, int) does not allow passing the unspecified value -1
+ MethodDescCallSite ctorMethod(METHOD__VERSION__CTOR_Ix2);
+ ARG_SLOT VersionArgs[] =
+ {
+ ObjToArgSlot(gc.Version),
+ (ARG_SLOT) m_context.usMajorVersion,
+ (ARG_SLOT) m_context.usMinorVersion
+ };
+ ctorMethod.Call(VersionArgs);
+ break;
+ }
+
+ case 3:
+ {
+ // Call Version(int, int, int) because Version(int, int, int, int) does not allow passing the unspecified value -1
+ MethodDescCallSite ctorMethod(METHOD__VERSION__CTOR_Ix3);
+ ARG_SLOT VersionArgs[] =
+ {
+ ObjToArgSlot(gc.Version),
+ (ARG_SLOT) m_context.usMajorVersion,
+ (ARG_SLOT) m_context.usMinorVersion,
+ (ARG_SLOT) m_context.usBuildNumber
+ };
+ ctorMethod.Call(VersionArgs);
+ break;
+ }
+
+ default:
+ {
+ // Call Version(int, int, int, int)
+ _ASSERTE(componentCount == 4);
+ MethodDescCallSite ctorMethod(METHOD__VERSION__CTOR_Ix4);
+ ARG_SLOT VersionArgs[] =
+ {
+ ObjToArgSlot(gc.Version),
+ (ARG_SLOT) m_context.usMajorVersion,
+ (ARG_SLOT) m_context.usMinorVersion,
+ (ARG_SLOT) m_context.usBuildNumber,
+ (ARG_SLOT) m_context.usRevisionNumber
+ };
+ ctorMethod.Call(VersionArgs);
+ break;
+ }
+ }
}
// cultureinfo
@@ -527,13 +576,13 @@ void AssemblySpec::AssemblyNameInit(ASSEMBLYNAMEREF* pAsmName, PEImage* pImageIn
strCtor.Call(args);
}
-
// public key or token byte array
if (m_pbPublicKeyOrToken)
- Security::CopyEncodingToByteArray((BYTE*) m_pbPublicKeyOrToken,
- m_cbPublicKeyOrToken,
- (OBJECTREF*) &gc.PublicKeyOrToken);
+ {
+ gc.PublicKeyOrToken = (U1ARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_U1, m_cbPublicKeyOrToken);
+ memcpyNoGCRefs(gc.PublicKeyOrToken->m_Array, m_pbPublicKeyOrToken, m_cbPublicKeyOrToken);
+ }
// simple name
if(GetName())
diff --git a/src/vm/binder.cpp b/src/vm/binder.cpp
index 9ce1584c31..6a60712e3f 100644
--- a/src/vm/binder.cpp
+++ b/src/vm/binder.cpp
@@ -311,6 +311,58 @@ Signature MscorlibBinder::GetSignatureLocal(LPHARDCODEDMETASIG pHardcodedSig)
#ifndef DACCESS_COMPILE
+bool MscorlibBinder::ConvertType(const BYTE*& pSig, SigBuilder * pSigBuilder)
+{
+ bool bSomethingResolved = false;
+
+ CorElementType type = (CorElementType)*pSig++;
+
+ switch (type)
+ {
+ case ELEMENT_TYPE_GENERICINST:
+ {
+ pSigBuilder->AppendElementType(type);
+ if (ConvertType(pSig, pSigBuilder))
+ bSomethingResolved = true;
+ int arity = *pSig++;
+ pSigBuilder->AppendData(arity);
+ for (int i = 0; i < arity; i++)
+ {
+ if (ConvertType(pSig, pSigBuilder))
+ bSomethingResolved = true;
+ }
+ }
+ break;
+
+ case ELEMENT_TYPE_BYREF:
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_SZARRAY:
+ pSigBuilder->AppendElementType(type);
+ if (ConvertType(pSig, pSigBuilder))
+ bSomethingResolved = true;
+ break;
+
+ case ELEMENT_TYPE_CLASS:
+ case ELEMENT_TYPE_VALUETYPE:
+ {
+ // The binder class id may overflow 1 byte. Use 2 bytes to encode it.
+ BinderClassID id = (BinderClassID)(*pSig + 0x100 * *(pSig + 1));
+ pSig += 2;
+
+ pSigBuilder->AppendElementType(type);
+ pSigBuilder->AppendToken(GetClassLocal(id)->GetCl());
+ bSomethingResolved = true;
+ }
+ break;
+
+ default:
+ pSigBuilder->AppendElementType(type);
+ break;
+ }
+
+ return bSomethingResolved;
+}
+
//------------------------------------------------------------------
// Resolve type references in the hardcoded metasig.
// Returns a new signature with type refences resolved.
@@ -327,7 +379,7 @@ void MscorlibBinder::BuildConvertedSignature(const BYTE* pSig, SigBuilder * pSig
unsigned argCount;
unsigned callConv;
- INDEBUG(bool bSomethingResolved = false;)
+ bool bSomethingResolved = false;
// calling convention
callConv = *pSig++;
@@ -346,51 +398,8 @@ void MscorlibBinder::BuildConvertedSignature(const BYTE* pSig, SigBuilder * pSig
// <= because we want to include the return value or the field
for (unsigned i = 0; i <= argCount; i++) {
-
- for (;;) {
- BinderClassID id = CLASS__NIL;
- bool again = false;
-
- CorElementType type = (CorElementType)*pSig++;
-
- switch (type)
- {
- case ELEMENT_TYPE_BYREF:
- case ELEMENT_TYPE_PTR:
- case ELEMENT_TYPE_SZARRAY:
- again = true;
- break;
-
- case ELEMENT_TYPE_CLASS:
- case ELEMENT_TYPE_VALUETYPE:
- // The binder class id may overflow 1 byte. Use 2 bytes to encode it.
- id = (BinderClassID) (*pSig + 0x100 * *(pSig + 1));
- pSig += 2;
- break;
-
- case ELEMENT_TYPE_VOID:
- if (i != 0) {
- if (pSig[-2] != ELEMENT_TYPE_PTR)
- THROW_BAD_FORMAT(BFA_ONLY_VOID_PTR_IN_ARGS, (Module*)NULL); // only pointer to void allowed in arguments
- }
- break;
-
- default:
- break;
- }
-
- pSigBuilder->AppendElementType(type);
-
- if (id != CLASS__NIL)
- {
- pSigBuilder->AppendToken(GetClassLocal(id)->GetCl());
-
- INDEBUG(bSomethingResolved = true;)
- }
-
- if (!again)
- break;
- }
+ if (ConvertType(pSig, pSigBuilder))
+ bSomethingResolved = true;
}
_ASSERTE(bSomethingResolved);
diff --git a/src/vm/binder.h b/src/vm/binder.h
index 8e26bdd255..ad80292ea0 100644
--- a/src/vm/binder.h
+++ b/src/vm/binder.h
@@ -277,6 +277,7 @@ private:
Signature GetSignatureLocal(LPHARDCODEDMETASIG pHardcodedSig);
+ bool ConvertType(const BYTE*& pSig, SigBuilder * pSigBuilder);
void BuildConvertedSignature(const BYTE* pSig, SigBuilder * pSigBuilder);
const BYTE* ConvertSignature(LPHARDCODEDMETASIG pHardcodedSig, const BYTE* pSig);
diff --git a/src/vm/callcounter.cpp b/src/vm/callcounter.cpp
index 90013c79fb..14d9e6e6a4 100644
--- a/src/vm/callcounter.cpp
+++ b/src/vm/callcounter.cpp
@@ -23,23 +23,6 @@ CallCounter::CallCounter()
m_lock.Init(LOCK_TYPE_DEFAULT);
}
-// Init our connection to the tiered compilation manager during
-// AppDomain startup. This pointer will remain valid for the lifetime
-// of the AppDomain.
-void CallCounter::SetTieredCompilationManager(TieredCompilationManager* pTieredCompilationManager)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CAN_TAKE_LOCK;
- MODE_PREEMPTIVE;
- }
- CONTRACTL_END;
-
- m_pTieredCompilationManager.Store(pTieredCompilationManager);
-}
-
// This is called by the prestub each time the method is invoked in a particular
// AppDomain (the AppDomain for which AppDomain.GetCallCounter() == this). These
// calls continue until we backpatch the prestub to avoid future calls. This allows
@@ -92,7 +75,7 @@ BOOL CallCounter::OnMethodCalled(MethodDesc* pMethodDesc)
}
}
- return m_pTieredCompilationManager.Load()->OnMethodCalled(pMethodDesc, callCount);
+ return GetAppDomain()->GetTieredCompilationManager()->OnMethodCalled(pMethodDesc, callCount);
}
#endif // FEATURE_TIERED_COMPILATION
diff --git a/src/vm/callcounter.h b/src/vm/callcounter.h
index 82d14b76d9..ed98ccb1c8 100644
--- a/src/vm/callcounter.h
+++ b/src/vm/callcounter.h
@@ -70,13 +70,10 @@ public:
CallCounter();
#endif
- void SetTieredCompilationManager(TieredCompilationManager* pTieredCompilationManager);
BOOL OnMethodCalled(MethodDesc* pMethodDesc);
private:
- VolatilePtr<TieredCompilationManager> m_pTieredCompilationManager;
-
// fields protected by lock
SpinLock m_lock;
CallCounterHash m_methodToCallCount;
diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp
index 68af979b2f..c64d9e9042 100644
--- a/src/vm/ceeload.cpp
+++ b/src/vm/ceeload.cpp
@@ -20,7 +20,6 @@
#include "reflectclasswriter.h"
#include "method.hpp"
#include "stublink.h"
-#include "security.h"
#include "cgensys.h"
#include "excep.h"
#include "dbginterface.h"
@@ -99,85 +98,6 @@
#define NGEN_STATICS_ALLCLASSES_WERE_LOADED -1
-
-//---------------------------------------------------------------------------------------
-InstrumentedILOffsetMapping::InstrumentedILOffsetMapping()
-{
- LIMITED_METHOD_DAC_CONTRACT;
-
- m_cMap = 0;
- m_rgMap = NULL;
- _ASSERTE(IsNull());
-}
-
-//---------------------------------------------------------------------------------------
-//
-// Check whether there is any mapping information stored in this object.
-//
-// Notes:
-// The memory should be alive throughout the process lifetime until
-// the Module containing the instrumented method is destructed.
-//
-
-BOOL InstrumentedILOffsetMapping::IsNull()
-{
- LIMITED_METHOD_DAC_CONTRACT;
-
- _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
- return (m_cMap == 0);
-}
-
-#if !defined(DACCESS_COMPILE)
-//---------------------------------------------------------------------------------------
-//
-// Release the memory used by the array of COR_IL_MAPs.
-//
-// Notes:
-// * The memory should be alive throughout the process lifetime until the Module containing
-// the instrumented method is destructed.
-// * This struct should be read-only in DAC builds.
-//
-
-void InstrumentedILOffsetMapping::Clear()
-{
- LIMITED_METHOD_CONTRACT;
-
- if (m_rgMap != NULL)
- {
- delete [] m_rgMap;
- }
-
- m_cMap = 0;
- m_rgMap = NULL;
-}
-#endif // !DACCESS_COMPILE
-
-#if !defined(DACCESS_COMPILE)
-void InstrumentedILOffsetMapping::SetMappingInfo(SIZE_T cMap, COR_IL_MAP * rgMap)
-{
- WRAPPER_NO_CONTRACT;
- _ASSERTE((cMap == 0) == (rgMap == NULL));
- m_cMap = cMap;
- m_rgMap = ARRAY_PTR_COR_IL_MAP(rgMap);
-}
-#endif // !DACCESS_COMPILE
-
-SIZE_T InstrumentedILOffsetMapping::GetCount() const
-{
- LIMITED_METHOD_DAC_CONTRACT;
-
- _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
- return m_cMap;
-}
-
-ARRAY_PTR_COR_IL_MAP InstrumentedILOffsetMapping::GetOffsets() const
-{
- LIMITED_METHOD_DAC_CONTRACT;
-
- _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
- return m_rgMap;
-}
-
BOOL Module::HasInlineTrackingMap()
{
LIMITED_METHOD_DAC_CONTRACT;
@@ -2883,7 +2803,7 @@ BOOL Module::IsPreV4Assembly()
}
-ArrayDPTR(FixupPointer<PTR_MethodTable>) ModuleCtorInfo::GetGCStaticMTs(DWORD index)
+ArrayDPTR(RelativeFixupPointer<PTR_MethodTable>) ModuleCtorInfo::GetGCStaticMTs(DWORD index)
{
LIMITED_METHOD_CONTRACT;
@@ -8007,6 +7927,55 @@ void Module::ExpandAll(DataImage *image)
_ASSERTE(pBlobEntry->type == EndOfBlobStream);
}
+ //
+ // Record references to all of the hot methods specifiled by MethodProfilingData array
+ // We call MethodReferencedByCompiledCode to indicate that we plan on compiling this method
+ //
+ CORBBTPROF_TOKEN_INFO * pMethodProfilingData = profileData->GetTokenFlagsData(MethodProfilingData);
+ DWORD cMethodProfilingData = profileData->GetTokenFlagsCount(MethodProfilingData);
+ for (unsigned int i = 0; (i < cMethodProfilingData); i++)
+ {
+ mdToken token = pMethodProfilingData[i].token;
+ DWORD profilingFlags = pMethodProfilingData[i].flags;
+
+ // We call MethodReferencedByCompiledCode only when the profile data indicates that
+ // we executed (i.e read) the code for the method
+ //
+ if (profilingFlags & (1 << ReadMethodCode))
+ {
+ if (TypeFromToken(token) == mdtMethodDef)
+ {
+ MethodDesc * pMD = LookupMethodDef(token);
+ //
+ // Record a reference to a hot non-generic method
+ //
+ image->GetPreloader()->MethodReferencedByCompiledCode((CORINFO_METHOD_HANDLE)pMD);
+ }
+ else if (TypeFromToken(token) == ibcMethodSpec)
+ {
+ CORBBTPROF_BLOB_PARAM_SIG_ENTRY *pBlobSigEntry = profileData->GetBlobSigEntry(token);
+
+ if (pBlobSigEntry != NULL)
+ {
+ _ASSERTE(pBlobSigEntry->blob.token == token);
+ MethodDesc * pMD = LoadIBCMethodHelper(image, pBlobSigEntry);
+
+ if (pMD != NULL)
+ {
+ // Occasionally a non-instantiated generic method shows up in the IBC data, we should NOT compile it.
+ if (!pMD->IsTypicalMethodDefinition())
+ {
+ //
+ // Record a reference to a hot instantiated generic method
+ //
+ image->GetPreloader()->MethodReferencedByCompiledCode((CORINFO_METHOD_HANDLE)pMD);
+ }
+ }
+ }
+ }
+ }
+ }
+
{
//
// Fill out MemberRef RID map and va sig cookies for
@@ -8131,6 +8100,8 @@ void Module::SaveTypeHandle(DataImage * image,
#endif // _DEBUG
}
+#ifndef DACCESS_COMPILE
+
void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
{
STANDARD_VM_CONTRACT;
@@ -8152,7 +8123,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
// items numElementsHot...i-1 are cold
for (i = 0; i < numElements; i++)
{
- MethodTable *ppMTTemp = ppMT[i];
+ MethodTable *ppMTTemp = ppMT[i].GetValue();
// Count the number of boxed statics along the way
totalBoxedStatics += ppMTTemp->GetNumBoxedRegularStatics();
@@ -8166,8 +8137,8 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
if (hot)
{
// swap ppMT[i] and ppMT[numElementsHot] to maintain the loop invariant
- ppMT[i] = ppMT[numElementsHot];
- ppMT[numElementsHot] = ppMTTemp;
+ ppMT[i].SetValue(ppMT[numElementsHot].GetValue());
+ ppMT[numElementsHot].SetValue(ppMTTemp);
numElementsHot++;
}
@@ -8192,11 +8163,11 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
for (i = 0; i < numElementsHot; i++)
{
- hashArray[i] = GenerateHash(ppMT[i], HOT);
+ hashArray[i] = GenerateHash(ppMT[i].GetValue(), HOT);
}
for (i = numElementsHot; i < numElements; i++)
{
- hashArray[i] = GenerateHash(ppMT[i], COLD);
+ hashArray[i] = GenerateHash(ppMT[i].GetValue(), COLD);
}
// Sort the two arrays by hash values to create regions with the same hash values.
@@ -8259,7 +8230,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
// make cctorInfoCold point to the first cold element
cctorInfoCold = cctorInfoHot + numElementsHot;
- ppHotGCStaticsMTs = (totalBoxedStatics != 0) ? new FixupPointer<PTR_MethodTable>[totalBoxedStatics] : NULL;
+ ppHotGCStaticsMTs = (totalBoxedStatics != 0) ? new RelativeFixupPointer<PTR_MethodTable>[totalBoxedStatics] : NULL;
numHotGCStaticsMTs = totalBoxedStatics;
DWORD iGCStaticMT = 0;
@@ -8275,7 +8246,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
ppColdGCStaticsMTs = ppHotGCStaticsMTs + numHotGCStaticsMTs;
}
- MethodTable* pMT = ppMT[i];
+ MethodTable* pMT = ppMT[i].GetValue();
ClassCtorInfoEntry* pEntry = &cctorInfoHot[i];
WORD numBoxedStatics = pMT->GetNumBoxedRegularStatics();
@@ -8305,7 +8276,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
== (iGCStaticMT - pEntry->firstBoxedStaticMTIndex) * sizeof(MethodTable*));
TypeHandle th = pField->GetFieldTypeHandleThrowing();
- ppHotGCStaticsMTs[iGCStaticMT++].SetValue(th.GetMethodTable());
+ ppHotGCStaticsMTs[iGCStaticMT++].SetValueMaybeNull(th.GetMethodTable());
numFoundBoxedStatics++;
}
@@ -8328,7 +8299,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
if (numElements > 0)
image->StoreStructure(ppMT,
- sizeof(MethodTable *) * numElements,
+ sizeof(RelativePointer<MethodTable *>) * numElements,
DataImage::ITEM_MODULE_CCTOR_INFO_HOT);
if (numElements > numElementsHot)
@@ -8345,7 +8316,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
if ( numHotGCStaticsMTs )
{
// Save the mt templates
- image->StoreStructure( ppHotGCStaticsMTs, numHotGCStaticsMTs * sizeof(MethodTable*),
+ image->StoreStructure( ppHotGCStaticsMTs, numHotGCStaticsMTs * sizeof(RelativeFixupPointer<MethodTable*>),
DataImage::ITEM_GC_STATIC_HANDLES_HOT);
}
else
@@ -8356,7 +8327,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
if ( numColdGCStaticsMTs )
{
// Save the hot mt templates
- image->StoreStructure( ppColdGCStaticsMTs, numColdGCStaticsMTs * sizeof(MethodTable*),
+ image->StoreStructure( ppColdGCStaticsMTs, numColdGCStaticsMTs * sizeof(RelativeFixupPointer<MethodTable*>),
DataImage::ITEM_GC_STATIC_HANDLES_COLD);
}
else
@@ -8365,6 +8336,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
}
}
+#endif // !DACCESS_COMPILE
bool Module::AreAllClassesFullyLoaded()
{
@@ -9165,13 +9137,20 @@ void Module::PlaceType(DataImage *image, TypeHandle th, DWORD profilingFlags)
{
if (pMT->HasPerInstInfo())
{
- Dictionary ** pPerInstInfo = pMT->GetPerInstInfo();
+ DPTR(MethodTable::PerInstInfoElem_t) pPerInstInfo = pMT->GetPerInstInfo();
BOOL fIsEagerBound = pMT->CanEagerBindToParentDictionaries(image, NULL);
if (fIsEagerBound)
{
- image->PlaceInternedStructureForAddress(pPerInstInfo, CORCOMPILE_SECTION_READONLY_SHARED_HOT, CORCOMPILE_SECTION_READONLY_HOT);
+ if (MethodTable::PerInstInfoElem_t::isRelative)
+ {
+ image->PlaceStructureForAddress(pPerInstInfo, CORCOMPILE_SECTION_READONLY_HOT);
+ }
+ else
+ {
+ image->PlaceInternedStructureForAddress(pPerInstInfo, CORCOMPILE_SECTION_READONLY_SHARED_HOT, CORCOMPILE_SECTION_READONLY_HOT);
+ }
}
else
{
@@ -9501,7 +9480,7 @@ void ModuleCtorInfo::Fixup(DataImage *image)
for (DWORD i=0; i<numElements; i++)
{
- image->FixupPointerField(ppMT, i * sizeof(ppMT[0]));
+ image->FixupRelativePointerField(ppMT, i * sizeof(ppMT[0]));
}
}
else
@@ -10123,11 +10102,37 @@ void Module::RestoreMethodTablePointer(RelativeFixupPointer<PTR_MethodTable> * p
if (ppMT->IsTagged((TADDR)ppMT))
{
- RestoreMethodTablePointerRaw(ppMT->GetValuePtr((TADDR)ppMT), pContainingModule, level);
+ RestoreMethodTablePointerRaw(ppMT->GetValuePtr(), pContainingModule, level);
+ }
+ else
+ {
+ ClassLoader::EnsureLoaded(ppMT->GetValue(), level);
+ }
+}
+
+/*static*/
+void Module::RestoreMethodTablePointer(PlainPointer<PTR_MethodTable> * ppMT,
+ Module *pContainingModule,
+ ClassLoadLevel level)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (ppMT->IsNull())
+ return;
+
+ if (ppMT->IsTagged())
+ {
+ RestoreMethodTablePointerRaw(ppMT->GetValuePtr(), pContainingModule, level);
}
else
{
- ClassLoader::EnsureLoaded(ppMT->GetValue((TADDR)ppMT), level);
+ ClassLoader::EnsureLoaded(ppMT->GetValue(), level);
}
}
@@ -10262,7 +10267,7 @@ PTR_Module Module::RestoreModulePointerIfLoaded(DPTR(RelativeFixupPointer<PTR_Mo
return ppModule->GetValue(dac_cast<TADDR>(ppModule));
#ifndef DACCESS_COMPILE
- PTR_Module * ppValue = ppModule->GetValuePtr(dac_cast<TADDR>(ppModule));
+ PTR_Module * ppValue = ppModule->GetValuePtr();
// Ensure that the compiler won't fetch the value twice
TADDR fixup = VolatileLoadWithoutBarrier((TADDR *)ppValue);
@@ -10315,7 +10320,7 @@ void Module::RestoreModulePointer(RelativeFixupPointer<PTR_Module> * ppModule, M
if (!ppModule->IsTagged((TADDR)ppModule))
return;
- PTR_Module * ppValue = ppModule->GetValuePtr((TADDR)ppModule);
+ PTR_Module * ppValue = ppModule->GetValuePtr();
// Ensure that the compiler won't fetch the value twice
TADDR fixup = VolatileLoadWithoutBarrier((TADDR *)ppValue);
@@ -10469,7 +10474,7 @@ void Module::RestoreTypeHandlePointer(RelativeFixupPointer<TypeHandle> * pHandle
if (pHandle->IsTagged((TADDR)pHandle))
{
- RestoreTypeHandlePointerRaw(pHandle->GetValuePtr((TADDR)pHandle), pContainingModule, level);
+ RestoreTypeHandlePointerRaw(pHandle->GetValuePtr(), pContainingModule, level);
}
else
{
@@ -10571,7 +10576,7 @@ void Module::RestoreMethodDescPointer(RelativeFixupPointer<PTR_MethodDesc> * ppM
if (ppMD->IsTagged((TADDR)ppMD))
{
- RestoreMethodDescPointerRaw(ppMD->GetValuePtr((TADDR)ppMD), pContainingModule, level);
+ RestoreMethodDescPointerRaw(ppMD->GetValuePtr(), pContainingModule, level);
}
else
{
@@ -13896,7 +13901,7 @@ ModuleCtorInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
// This class is contained so do not enumerate 'this'.
DacEnumMemoryRegion(dac_cast<TADDR>(ppMT), numElements *
- sizeof(TADDR));
+ sizeof(RelativePointer<MethodTable *>));
DacEnumMemoryRegion(dac_cast<TADDR>(cctorInfoHot), numElementsHot *
sizeof(ClassCtorInfoEntry));
DacEnumMemoryRegion(dac_cast<TADDR>(cctorInfoCold),
@@ -14113,97 +14118,6 @@ LPCWSTR Module::GetPathForErrorMessages()
}
}
-#ifndef DACCESS_COMPILE
-BOOL IsVerifiableWrapper(MethodDesc* pMD)
-{
- BOOL ret = FALSE;
- //EX_TRY contains _alloca, so I can't use this inside of a loop. 4wesome.
- EX_TRY
- {
- ret = pMD->IsVerifiable();
- }
- EX_CATCH
- {
- //if the method has a security exception, it will fly through IsVerifiable. Shunt
- //to the unverifiable path below.
- }
- EX_END_CATCH(RethrowTerminalExceptions)
- return ret;
-}
-#endif //DACCESS_COMPILE
-void Module::VerifyAllMethods()
-{
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- MODE_ANY;
- }
- CONTRACTL_END;
-#ifndef DACCESS_COMPILE
- //If the EE isn't started yet, it's not safe to jit. We fail in COM jitting a p/invoke.
- if (!g_fEEStarted)
- return;
-
- struct Local
- {
- static bool VerifyMethodsForTypeDef(Module * pModule, mdTypeDef td)
- {
- bool ret = true;
- TypeHandle th = ClassLoader::LoadTypeDefThrowing(pModule, td, ClassLoader::ThrowIfNotFound,
- ClassLoader::PermitUninstDefOrRef);
-
- MethodTable * pMT = th.GetMethodTable();
- MethodTable::MethodIterator it(pMT);
- for (; it.IsValid(); it.Next())
- {
- MethodDesc * pMD = it.GetMethodDesc();
- if (pMD->HasILHeader() && Security::IsMethodTransparent(pMD)
- && (g_pObjectCtorMD != pMD))
- {
- if (!IsVerifiableWrapper(pMD))
- {
-#ifdef _DEBUG
- SString s;
- if (LoggingOn(LF_VERIFIER, LL_ERROR))
- TypeString::AppendMethodDebug(s, pMD);
- LOG((LF_VERIFIER, LL_ERROR, "Transparent Method (0x%p), %S is unverifiable\n",
- pMD, s.GetUnicode()));
-#endif
- ret = false;
- }
- }
- }
- return ret;
- }
- };
- //Verify all methods in a module eagerly, forcing them to get loaded.
-
- IMDInternalImport * pMDI = GetMDImport();
- HENUMTypeDefInternalHolder hEnum(pMDI);
- mdTypeDef td;
- hEnum.EnumTypeDefInit();
-
- bool isAllVerifiable = true;
- //verify global methods
- if (GetGlobalMethodTable())
- {
- //verify everything in the MT.
- if (!Local::VerifyMethodsForTypeDef(this, COR_GLOBAL_PARENT_TOKEN))
- isAllVerifiable = false;
- }
- while (pMDI->EnumTypeDefNext(&hEnum, &td))
- {
- //verify everything
- if (!Local::VerifyMethodsForTypeDef(this, td))
- isAllVerifiable = false;
- }
- if (!isAllVerifiable)
- EEFileLoadException::Throw(GetFile(), COR_E_VERIFICATION);
-#endif //DACCESS_COMPILE
-}
-
-
#if defined(_DEBUG) && !defined(DACCESS_COMPILE) && !defined(CROSS_COMPILE)
void Module::ExpandAll()
{
@@ -14235,16 +14149,7 @@ void Module::ExpandAll()
|| pMD->HasClassInstantiation())
&& (pMD->MayHaveNativeCode() && !pMD->IsFCallOrIntrinsic()))
{
- COR_ILMETHOD * ilHeader = pMD->GetILHeader();
- COR_ILMETHOD_DECODER::DecoderStatus ignored;
- NewHolder<COR_ILMETHOD_DECODER> pHeader(new COR_ILMETHOD_DECODER(ilHeader,
- pMD->GetMDImport(),
- &ignored));
-#ifdef FEATURE_INTERPRETER
- pMD->MakeJitWorker(pHeader, CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE));
-#else
- pMD->MakeJitWorker(pHeader, CORJIT_FLAGS());
-#endif
+ pMD->PrepareInitialCode();
}
}
static void CompileMethodsForMethodTable(MethodTable * pMT)
@@ -14314,9 +14219,6 @@ void Module::ExpandAll()
};
//Jit all methods eagerly
- /* XXX Thu 4/26/2007
- * This code is lifted mostly from code:Module::VerifyAllMethods
- */
IMDInternalImport * pMDI = GetMDImport();
HENUMTypeDefInternalHolder hEnum(pMDI);
mdTypeDef td;
diff --git a/src/vm/ceeload.h b/src/vm/ceeload.h
index bc9937a828..b70ea51feb 100644
--- a/src/vm/ceeload.h
+++ b/src/vm/ceeload.h
@@ -47,6 +47,8 @@
#include "readytoruninfo.h"
#endif
+#include "ilinstrumentation.h"
+
class PELoader;
class Stub;
class MethodDesc;
@@ -77,7 +79,9 @@ class CerNgenRootTable;
struct MethodContextElement;
class TypeHandleList;
class ProfileEmitter;
-class ReJitManager;
+class CodeVersionManager;
+class CallCounter;
+class TieredCompilationManager;
class TrackingMap;
struct MethodInModule;
class PersistentInlineTrackingMapNGen;
@@ -618,7 +622,7 @@ struct ModuleCtorInfo
DWORD numElements;
DWORD numLastAllocated;
DWORD numElementsHot;
- DPTR(PTR_MethodTable) ppMT; // size is numElements
+ DPTR(RelativePointer<PTR_MethodTable>) ppMT; // size is numElements
PTR_ClassCtorInfoEntry cctorInfoHot; // size is numElementsHot
PTR_ClassCtorInfoEntry cctorInfoCold; // size is numElements-numElementsHot
@@ -627,8 +631,8 @@ struct ModuleCtorInfo
DWORD numHotHashes;
DWORD numColdHashes;
- ArrayDPTR(FixupPointer<PTR_MethodTable>) ppHotGCStaticsMTs; // hot table
- ArrayDPTR(FixupPointer<PTR_MethodTable>) ppColdGCStaticsMTs; // cold table
+ ArrayDPTR(RelativeFixupPointer<PTR_MethodTable>) ppHotGCStaticsMTs; // hot table
+ ArrayDPTR(RelativeFixupPointer<PTR_MethodTable>) ppColdGCStaticsMTs; // cold table
DWORD numHotGCStaticsMTs;
DWORD numColdGCStaticsMTs;
@@ -664,7 +668,13 @@ struct ModuleCtorInfo
return hashVal;
};
- ArrayDPTR(FixupPointer<PTR_MethodTable>) GetGCStaticMTs(DWORD index);
+ ArrayDPTR(RelativeFixupPointer<PTR_MethodTable>) GetGCStaticMTs(DWORD index);
+
+ PTR_MethodTable GetMT(DWORD i)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return ppMT[i].GetValue(dac_cast<TADDR>(ppMT) + i * sizeof(RelativePointer<PTR_MethodTable>));
+ }
#ifdef FEATURE_PREJIT
@@ -675,11 +685,11 @@ struct ModuleCtorInfo
class ClassCtorInfoEntryArraySort : public CQuickSort<DWORD>
{
private:
- PTR_MethodTable *m_pBase1;
+ DPTR(RelativePointer<PTR_MethodTable>) m_pBase1;
public:
//Constructor
- ClassCtorInfoEntryArraySort(DWORD *base, PTR_MethodTable *base1, int count)
+ ClassCtorInfoEntryArraySort(DWORD *base, DPTR(RelativePointer<PTR_MethodTable>) base1, int count)
: CQuickSort<DWORD>(base, count)
{
WRAPPER_NO_CONTRACT;
@@ -700,6 +710,7 @@ struct ModuleCtorInfo
return 1;
}
+#ifndef DACCESS_COMPILE
// Swap is overwriten so that we can sort both the MethodTable pointer
// array and the ClassCtorInfoEntry array in parrallel.
FORCEINLINE void Swap(SSIZE_T iFirst, SSIZE_T iSecond)
@@ -715,10 +726,11 @@ struct ModuleCtorInfo
m_pBase[iFirst] = m_pBase[iSecond];
m_pBase[iSecond] = sTemp;
- sTemp1 = m_pBase1[iFirst];
- m_pBase1[iFirst] = m_pBase1[iSecond];
- m_pBase1[iSecond] = sTemp1;
+ sTemp1 = m_pBase1[iFirst].GetValueMaybeNull();
+ m_pBase1[iFirst].SetValueMaybeNull(m_pBase1[iSecond].GetValueMaybeNull());
+ m_pBase1[iSecond].SetValueMaybeNull(sTemp1);
}
+#endif // !DACCESS_COMPILE
};
#endif // FEATURE_PREJIT
};
@@ -1085,104 +1097,6 @@ typedef SHash<DynamicILBlobTraits> DynamicILBlobTable;
typedef DPTR(DynamicILBlobTable) PTR_DynamicILBlobTable;
-// declare an array type of COR_IL_MAP entries
-typedef ArrayDPTR(COR_IL_MAP) ARRAY_PTR_COR_IL_MAP;
-
-//---------------------------------------------------------------------------------------
-//
-// A profiler may instrument a method by changing the IL. This is typically done when the profiler receives
-// a JITCompilationStarted notification. The profiler also has the option to provide the runtime with
-// a mapping between original IL offsets and instrumented IL offsets. This struct is a simple container
-// for storing the mapping information. We store the mapping information on the Module class, where it can
-// be accessed by the debugger from out-of-process.
-//
-
-class InstrumentedILOffsetMapping
-{
-public:
- InstrumentedILOffsetMapping();
-
- // Check whether there is any mapping information stored in this object.
- BOOL IsNull();
-
-#if !defined(DACCESS_COMPILE)
- // Release the memory used by the array of COR_IL_MAPs.
- void Clear();
-
- void SetMappingInfo(SIZE_T cMap, COR_IL_MAP * rgMap);
-#endif // !DACCESS_COMPILE
-
- SIZE_T GetCount() const;
- ARRAY_PTR_COR_IL_MAP GetOffsets() const;
-
-private:
- SIZE_T m_cMap; // the number of elements in m_rgMap
- ARRAY_PTR_COR_IL_MAP m_rgMap; // an array of COR_IL_MAPs
-};
-
-//---------------------------------------------------------------------------------------
-//
-// Hash table entry for storing InstrumentedILOffsetMapping. This is keyed by the MethodDef token.
-//
-
-struct ILOffsetMappingEntry
-{
- ILOffsetMappingEntry()
- {
- LIMITED_METHOD_DAC_CONTRACT;
-
- m_methodToken = mdMethodDefNil;
- // No need to initialize m_mapping. The default ctor of InstrumentedILOffsetMapping does the job.
- }
-
- ILOffsetMappingEntry(mdMethodDef token, InstrumentedILOffsetMapping mapping)
- {
- LIMITED_METHOD_DAC_CONTRACT;
-
- m_methodToken = token;
- m_mapping = mapping;
- }
-
- mdMethodDef m_methodToken;
- InstrumentedILOffsetMapping m_mapping;
-};
-
-//---------------------------------------------------------------------------------------
-//
-// This class is used to create the hash table for the instrumented IL offset mapping.
-// It encapsulates the desired behaviour of the templated hash table and implements
-// the various functions needed by the hash table.
-//
-
-class ILOffsetMappingTraits : public NoRemoveSHashTraits<DefaultSHashTraits<ILOffsetMappingEntry> >
-{
-public:
- typedef mdMethodDef key_t;
-
- static key_t GetKey(element_t e)
- {
- LIMITED_METHOD_DAC_CONTRACT;
- return e.m_methodToken;
- }
- static BOOL Equals(key_t k1, key_t k2)
- {
- LIMITED_METHOD_DAC_CONTRACT;
- return (k1 == k2);
- }
- static count_t Hash(key_t k)
- {
- LIMITED_METHOD_DAC_CONTRACT;
- return (count_t)(size_t)k;
- }
- static const element_t Null()
- {
- LIMITED_METHOD_DAC_CONTRACT;
- ILOffsetMappingEntry e;
- return e;
- }
- static bool IsNull(const element_t &e) { LIMITED_METHOD_DAC_CONTRACT; return e.m_methodToken == mdMethodDefNil; }
-};
-
// ESymbolFormat specified the format used by a symbol stream
typedef enum
{
@@ -1192,11 +1106,6 @@ typedef enum
}ESymbolFormat;
-// Hash table of profiler-provided instrumented IL offset mapping, keyed by the MethodDef token
-typedef SHash<ILOffsetMappingTraits> ILOffsetMappingTable;
-typedef DPTR(ILOffsetMappingTable) PTR_ILOffsetMappingTable;
-
-
#ifdef FEATURE_COMINTEROP
//---------------------------------------------------------------------------------------
@@ -1885,7 +1794,12 @@ protected:
ClassLoader *GetClassLoader();
PTR_BaseDomain GetDomain();
- ReJitManager * GetReJitManager();
+#ifdef FEATURE_CODE_VERSIONING
+ CodeVersionManager * GetCodeVersionManager();
+#endif
+#ifdef FEATURE_TIERED_COMPILATION
+ CallCounter * GetCallCounter();
+#endif
mdFile GetModuleRef()
{
@@ -2902,6 +2816,10 @@ public:
ClassLoadLevel level = CLASS_LOADED);
static void RestoreFieldDescPointer(RelativeFixupPointer<PTR_FieldDesc> * ppFD);
+ static void RestoreMethodTablePointer(PlainPointer<PTR_MethodTable> * ppMT,
+ Module *pContainingModule = NULL,
+ ClassLoadLevel level = CLASS_LOADED);
+
static void RestoreModulePointer(RelativeFixupPointer<PTR_Module> * ppModule, Module *pContainingModule);
static PTR_Module RestoreModulePointerIfLoaded(DPTR(RelativeFixupPointer<PTR_Module>) ppModule, Module *pContainingModule);
@@ -3353,8 +3271,6 @@ protected:
public:
- void VerifyAllMethods();
-
CrstBase *GetLookupTableCrst()
{
LIMITED_METHOD_CONTRACT;
diff --git a/src/vm/ceeload.inl b/src/vm/ceeload.inl
index 8226dce7d7..3afef732cc 100644
--- a/src/vm/ceeload.inl
+++ b/src/vm/ceeload.inl
@@ -656,10 +656,20 @@ inline MethodTable* Module::GetDynamicClassMT(DWORD dynamicClassID)
return m_pDynamicStaticsInfo[dynamicClassID].pEnclosingMT;
}
-inline ReJitManager * Module::GetReJitManager()
+#ifdef FEATURE_CODE_VERSIONING
+inline CodeVersionManager * Module::GetCodeVersionManager()
{
LIMITED_METHOD_CONTRACT;
- return GetDomain()->GetReJitManager();
+ return GetDomain()->GetCodeVersionManager();
}
+#endif // FEATURE_CODE_VERSIONING
+
+#ifdef FEATURE_TIERED_COMPILATION
+inline CallCounter * Module::GetCallCounter()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetDomain()->GetCallCounter();
+}
+#endif // FEATURE_TIERED_COMPILATION
#endif // CEELOAD_INL_
diff --git a/src/vm/ceemain.cpp b/src/vm/ceemain.cpp
index f28785bc21..011caf0997 100644
--- a/src/vm/ceemain.cpp
+++ b/src/vm/ceemain.cpp
@@ -137,7 +137,6 @@
#include "stackwalk.h"
#include "gcheaputilities.h"
#include "interoputil.h"
-#include "security.h"
#include "fieldmarshaler.h"
#include "dbginterface.h"
#include "eedbginterfaceimpl.h"
@@ -900,10 +899,6 @@ void EEStartupHelper(COINITIEE fFlags)
#ifndef CROSSGEN_COMPILE
- // This isn't done as part of InitializeGarbageCollector() above because thread
- // creation requires AppDomains to have been set up.
- FinalizerThread::FinalizerThreadCreate();
-
#ifndef FEATURE_PAL
// Watson initialization must precede InitializeDebugger() and InstallUnhandledExceptionFilter()
// because on CoreCLR when Waston is enabled, debugging service needs to be enabled and UEF will be used.
@@ -1010,6 +1005,10 @@ void EEStartupHelper(COINITIEE fFlags)
hr = g_pGCHeap->Initialize();
IfFailGo(hr);
+ // This isn't done as part of InitializeGarbageCollector() above because thread
+ // creation requires AppDomains to have been set up.
+ FinalizerThread::FinalizerThreadCreate();
+
// Now we really have fully initialized the garbage collector
SetGarbageCollectorFullyInitialized();
@@ -1111,12 +1110,6 @@ void EEStartupHelper(COINITIEE fFlags)
SystemDomain::SystemModule()->ExpandAll();
}
- //For a similar reason, let's not run VerifyAllOnLoad either.
- if (g_pConfig->VerifyModulesOnLoad())
- {
- SystemDomain::SystemModule()->VerifyAllMethods();
- }
-
// Perform mscorlib consistency check if requested
g_Mscorlib.CheckExtended();
@@ -1630,6 +1623,13 @@ void STDMETHODCALLTYPE EEShutDownHelper(BOOL fIsDllUnloading)
// Indicate the EE is the shut down phase.
g_fEEShutDown |= ShutDown_Start;
+#ifdef FEATURE_TIERED_COMPILATION
+ {
+ GCX_PREEMP();
+ TieredCompilationManager::ShutdownAllDomains();
+ }
+#endif
+
fFinalizeOK = TRUE;
// Terminate the BBSweep thread
@@ -2509,6 +2509,7 @@ void LoadGarbageCollector()
#endif // FEATURE_STANDALONE_GC
+#ifndef FEATURE_STANDALONE_GC_ONLY
void LoadStaticGarbageCollector()
{
CONTRACTL{
@@ -2531,6 +2532,7 @@ void LoadStaticGarbageCollector()
g_pGCHandleManager = pGcHandleManager;
g_gcDacGlobals = &g_gc_dac_vars;
}
+#endif // FEATURE_STANDALONE_GC_ONLY
void InitializeGarbageCollector()
@@ -2567,7 +2569,9 @@ void InitializeGarbageCollector()
else
#endif // FEATURE_STANDALONE_GC
{
+#ifndef FEATURE_STANDALONE_GC_ONLY
LoadStaticGarbageCollector();
+#endif // FEATURE_STANDALONE_GC_ONLY
}
// Apparently the Windows linker removes global variables if they are never
diff --git a/src/vm/class.cpp b/src/vm/class.cpp
index 6697b23a9a..e4268b57ec 100644
--- a/src/vm/class.cpp
+++ b/src/vm/class.cpp
@@ -5,12 +5,6 @@
// File: CLASS.CPP
//
-
-//
-
-//
-// ============================================================================
-
#include "common.h"
#include "dllimport.h"
@@ -889,7 +883,15 @@ ClassLoader::LoadExactParentAndInterfacesTransitively(MethodTable *pMT)
LOG((LF_CLASSLOADER, LL_INFO1000, "GENERICS: Replaced approximate parent %s with exact parent %s from token %x\n", pParentMT->GetDebugClassName(), pNewParentMT->GetDebugClassName(), crExtends));
// SetParentMethodTable is not used here since we want to update the indirection cell in the NGen case
- *EnsureWritablePages(pMT->GetParentMethodTablePtr()) = pNewParentMT;
+ if (pMT->GetParentMethodTablePlainOrRelativePointerPtr()->IsIndirectPtrMaybeNull())
+ {
+ *EnsureWritablePages(pMT->GetParentMethodTablePlainOrRelativePointerPtr()->GetValuePtr()) = pNewParentMT;
+ }
+ else
+ {
+ EnsureWritablePages(pMT->GetParentMethodTablePlainOrRelativePointerPtr());
+ pMT->GetParentMethodTablePlainOrRelativePointerPtr()->SetValueMaybeNull(pNewParentMT);
+ }
pParentMT = pNewParentMT;
}
@@ -908,8 +910,11 @@ ClassLoader::LoadExactParentAndInterfacesTransitively(MethodTable *pMT)
DWORD nDicts = pParentMT->GetNumDicts();
for (DWORD iDict = 0; iDict < nDicts; iDict++)
{
- if (pMT->GetPerInstInfo()[iDict] != pParentMT->GetPerInstInfo()[iDict])
- *EnsureWritablePages(&pMT->GetPerInstInfo()[iDict]) = pParentMT->GetPerInstInfo()[iDict];
+ if (pMT->GetPerInstInfo()[iDict].GetValueMaybeNull() != pParentMT->GetPerInstInfo()[iDict].GetValueMaybeNull())
+ {
+ EnsureWritablePages(&pMT->GetPerInstInfo()[iDict]);
+ pMT->GetPerInstInfo()[iDict].SetValueMaybeNull(pParentMT->GetPerInstInfo()[iDict].GetValueMaybeNull());
+ }
}
}
@@ -1626,9 +1631,21 @@ MethodDesc* MethodTable::GetExistingUnboxedEntryPointMD(MethodDesc *pMD)
);
}
-#endif // !DACCESS_COMPILE
+#endif // !DACCESS_COMPILE
+
+//*******************************************************************************
+#if !defined(FEATURE_HFA)
+bool MethodTable::IsHFA()
+{
+ LIMITED_METHOD_CONTRACT;
+#ifdef DACCESS_COMPILE
+ return false;
+#else
+ return GetClass()->CheckForHFA();
+#endif
+}
+#endif // !FEATURE_HFA
-#ifdef FEATURE_HFA
//*******************************************************************************
CorElementType MethodTable::GetHFAType()
{
@@ -1683,6 +1700,214 @@ CorElementType MethodTable::GetNativeHFAType()
LIMITED_METHOD_CONTRACT;
return HasLayout() ? GetLayoutInfo()->GetNativeHFAType() : GetHFAType();
}
+
+//---------------------------------------------------------------------------------------
+//
+// When FEATURE_HFA is defined, we cache the value; otherwise we recompute it with each
+// call. The latter is only for the armaltjit and the arm64altjit.
+bool
+#if defined(FEATURE_HFA)
+EEClass::CheckForHFA(MethodTable ** pByValueClassCache)
+#else
+EEClass::CheckForHFA()
+#endif
+{
+ STANDARD_VM_CONTRACT;
+
+ // This method should be called for valuetypes only
+ _ASSERTE(GetMethodTable()->IsValueType());
+
+ // No HFAs with explicit layout. There may be cases where explicit layout may be still
+ // eligible for HFA, but it is hard to tell the real intent. Make it simple and just
+ // unconditionally disable HFAs for explicit layout.
+ if (HasExplicitFieldOffsetLayout())
+ return false;
+
+ CorElementType hfaType = ELEMENT_TYPE_END;
+
+ FieldDesc *pFieldDescList = GetFieldDescList();
+ for (UINT i = 0; i < GetNumInstanceFields(); i++)
+ {
+ FieldDesc *pFD = &pFieldDescList[i];
+ CorElementType fieldType = pFD->GetFieldType();
+
+ switch (fieldType)
+ {
+ case ELEMENT_TYPE_VALUETYPE:
+#if defined(FEATURE_HFA)
+ fieldType = pByValueClassCache[i]->GetHFAType();
+#else
+ fieldType = pFD->LookupApproxFieldTypeHandle().AsMethodTable()->GetHFAType();
+#endif
+ break;
+
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ break;
+
+ default:
+ // Not HFA
+ return false;
+ }
+
+ // Field type should be a valid HFA type.
+ if (fieldType == ELEMENT_TYPE_END)
+ {
+ return false;
+ }
+
+ // Initialize with a valid HFA type.
+ if (hfaType == ELEMENT_TYPE_END)
+ {
+ hfaType = fieldType;
+ }
+ // All field types should be equal.
+ else if (fieldType != hfaType)
+ {
+ return false;
+ }
+ }
+
+ if (hfaType == ELEMENT_TYPE_END)
+ return false;
+
+ int elemSize = (hfaType == ELEMENT_TYPE_R8) ? sizeof(double) : sizeof(float);
+
+ // Note that we check the total size, but do not perform any checks on number of fields:
+ // - Type of fields can be HFA valuetype itself
+ // - Managed C++ HFA valuetypes have just one <alignment member> of type float to signal that
+ // the valuetype is HFA and explicitly specified size
+
+ DWORD totalSize = GetMethodTable()->GetNumInstanceFieldBytes();
+
+ if (totalSize % elemSize != 0)
+ return false;
+
+ // On ARM, HFAs can have a maximum of four fields regardless of whether those are float or double.
+ if (totalSize / elemSize > 4)
+ return false;
+
+ // All the above tests passed. It's HFA!
+#if defined(FEATURE_HFA)
+ GetMethodTable()->SetIsHFA();
+#endif
+ return true;
+}
+
+CorElementType EEClassLayoutInfo::GetNativeHFATypeRaw()
+{
+ UINT numReferenceFields = GetNumCTMFields();
+
+ CorElementType hfaType = ELEMENT_TYPE_END;
+
+#ifndef DACCESS_COMPILE
+ const FieldMarshaler *pFieldMarshaler = GetFieldMarshalers();
+ while (numReferenceFields--)
+ {
+ CorElementType fieldType = ELEMENT_TYPE_END;
+
+ switch (pFieldMarshaler->GetNStructFieldType())
+ {
+ case NFT_COPY4:
+ case NFT_COPY8:
+ fieldType = pFieldMarshaler->GetFieldDesc()->GetFieldType();
+ if (fieldType != ELEMENT_TYPE_R4 && fieldType != ELEMENT_TYPE_R8)
+ return ELEMENT_TYPE_END;
+ break;
+
+ case NFT_NESTEDLAYOUTCLASS:
+ fieldType = ((FieldMarshaler_NestedLayoutClass *)pFieldMarshaler)->GetMethodTable()->GetNativeHFAType();
+ break;
+
+ case NFT_NESTEDVALUECLASS:
+ fieldType = ((FieldMarshaler_NestedValueClass *)pFieldMarshaler)->GetMethodTable()->GetNativeHFAType();
+ break;
+
+ case NFT_FIXEDARRAY:
+ fieldType = ((FieldMarshaler_FixedArray *)pFieldMarshaler)->GetElementTypeHandle().GetMethodTable()->GetNativeHFAType();
+ break;
+
+ case NFT_DATE:
+ fieldType = ELEMENT_TYPE_R8;
+ break;
+
+ default:
+ // Not HFA
+ return ELEMENT_TYPE_END;
+ }
+
+ // Field type should be a valid HFA type.
+ if (fieldType == ELEMENT_TYPE_END)
+ {
+ return ELEMENT_TYPE_END;
+ }
+
+ // Initialize with a valid HFA type.
+ if (hfaType == ELEMENT_TYPE_END)
+ {
+ hfaType = fieldType;
+ }
+ // All field types should be equal.
+ else if (fieldType != hfaType)
+ {
+ return ELEMENT_TYPE_END;
+ }
+
+ ((BYTE*&)pFieldMarshaler) += MAXFIELDMARSHALERSIZE;
+ }
+
+ if (hfaType == ELEMENT_TYPE_END)
+ return ELEMENT_TYPE_END;
+
+ int elemSize = (hfaType == ELEMENT_TYPE_R8) ? sizeof(double) : sizeof(float);
+
+ // Note that we check the total size, but do not perform any checks on number of fields:
+ // - Type of fields can be HFA valuetype itself
+ // - Managed C++ HFA valuetypes have just one <alignment member> of type float to signal that
+ // the valuetype is HFA and explicitly specified size
+
+ DWORD totalSize = GetNativeSize();
+
+ if (totalSize % elemSize != 0)
+ return ELEMENT_TYPE_END;
+
+ // On ARM, HFAs can have a maximum of four fields regardless of whether those are float or double.
+ if (totalSize / elemSize > 4)
+ return ELEMENT_TYPE_END;
+
+#endif // !DACCESS_COMPILE
+
+ return hfaType;
+}
+
+#ifdef FEATURE_HFA
+//
+// The managed and unmanaged views of the types can differ for non-blitable types. This method
+// mirrors the HFA type computation for the unmanaged view.
+//
+VOID EEClass::CheckForNativeHFA()
+{
+ STANDARD_VM_CONTRACT;
+
+ // No HFAs with inheritance
+ if (!(GetMethodTable()->IsValueType() || (GetMethodTable()->GetParentMethodTable() == g_pObjectClass)))
+ return;
+
+ // No HFAs with explicit layout. There may be cases where explicit layout may be still
+ // eligible for HFA, but it is hard to tell the real intent. Make it simple and just
+ // unconditionally disable HFAs for explicit layout.
+ if (HasExplicitFieldOffsetLayout())
+ return;
+
+ CorElementType hfaType = GetLayoutInfo()->GetNativeHFATypeRaw();
+ if (hfaType == ELEMENT_TYPE_END)
+ {
+ return;
+ }
+
+ // All the above tests passed. It's HFA!
+ GetLayoutInfo()->SetNativeHFAType(hfaType);
+}
#endif // FEATURE_HFA
#ifdef FEATURE_64BIT_ALIGNMENT
diff --git a/src/vm/class.h b/src/vm/class.h
index 60cab67707..8bbbf55b9b 100644
--- a/src/vm/class.h
+++ b/src/vm/class.h
@@ -2,19 +2,11 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-// ==++==
-//
-//
-
-//
// ==--==
//
// File: CLASS.H
//
-
-//
-
//
// NOTE: Even though EEClass is considered to contain cold data (relative to MethodTable), these data
// structures *are* touched (especially during startup as part of soft-binding). As a result, and given the
@@ -526,6 +518,7 @@ class EEClassLayoutInfo
}
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
+ CorElementType GetNativeHFATypeRaw();
#ifdef FEATURE_HFA
bool IsNativeHFA()
{
@@ -540,7 +533,16 @@ class EEClassLayoutInfo
return (m_bFlags & e_R4_HFA) ? ELEMENT_TYPE_R4 : ELEMENT_TYPE_R8;
return ELEMENT_TYPE_END;
}
-#endif
+#else // !FEATURE_HFA
+ bool IsNativeHFA()
+ {
+ return GetNativeHFATypeRaw() != ELEMENT_TYPE_END;
+ }
+ CorElementType GetNativeHFAType()
+ {
+ return GetNativeHFATypeRaw();
+ }
+#endif // !FEATURE_HFA
private:
void SetIsBlittable(BOOL isBlittable)
@@ -1300,77 +1302,6 @@ public:
}
#endif
- inline BOOL IsCritical()
- {
- LIMITED_METHOD_CONTRACT;
- _ASSERTE(HasCriticalTransparentInfo());
- return (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) != VMFLAG_TRANSPARENCY_TRANSPARENT
- && !IsAllTransparent();
- }
-
- inline BOOL IsTreatAsSafe()
- {
- LIMITED_METHOD_CONTRACT;
- _ASSERTE(HasCriticalTransparentInfo());
- return (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) == VMFLAG_TRANSPARENCY_ALLCRITICAL_TAS ||
- (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) == VMFLAG_TRANSPARENCY_TAS_NOTCRITICAL
- ;
- }
-
- inline BOOL IsAllTransparent()
- {
- LIMITED_METHOD_CONTRACT;
- _ASSERTE(HasCriticalTransparentInfo());
- return (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) == VMFLAG_TRANSPARENCY_ALL_TRANSPARENT;
- }
-
- inline BOOL IsAllCritical()
- {
- LIMITED_METHOD_CONTRACT;
- _ASSERTE(HasCriticalTransparentInfo());
- return (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) == VMFLAG_TRANSPARENCY_ALLCRITICAL
- || (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) == VMFLAG_TRANSPARENCY_ALLCRITICAL_TAS;
- }
-
- inline BOOL HasCriticalTransparentInfo()
- {
- LIMITED_METHOD_CONTRACT;
- return (m_VMFlags & VMFLAG_TRANSPARENCY_MASK) != VMFLAG_TRANSPARENCY_UNKNOWN;
- }
-
- void SetCriticalTransparentInfo(
- BOOL fIsTreatAsSafe,
- BOOL fIsAllTransparent,
- BOOL fIsAllCritical)
- {
- WRAPPER_NO_CONTRACT;
-
- // TAS wihtout critical doesn't make sense - although it was allowed in the v2 desktop model,
- // so we need to allow it for compatibility reasons on the desktop.
- _ASSERTE(!fIsTreatAsSafe || fIsAllCritical);
-
- //if nothing is set, then we're transparent.
- unsigned flags = VMFLAG_TRANSPARENCY_TRANSPARENT;
-
- if (fIsAllTransparent)
- {
- flags = VMFLAG_TRANSPARENCY_ALL_TRANSPARENT;
- }
- else if (fIsAllCritical)
- {
- flags = fIsTreatAsSafe ? VMFLAG_TRANSPARENCY_ALLCRITICAL_TAS :
- VMFLAG_TRANSPARENCY_ALLCRITICAL;
- }
- else
- {
- flags = fIsTreatAsSafe ? VMFLAG_TRANSPARENCY_TAS_NOTCRITICAL :
- VMFLAG_TRANSPARENCY_TRANSPARENT;
- }
-
- FastInterlockOr(EnsureWritablePages(&m_VMFlags), flags);
-
- _ASSERTE(HasCriticalTransparentInfo());
- }
inline DWORD IsUnsafeValueClass()
{
LIMITED_METHOD_CONTRACT;
@@ -1398,29 +1329,6 @@ public:
}
public:
-
- inline void SetDoesNotHaveSuppressUnmanagedCodeAccessAttr()
- {
- WRAPPER_NO_CONTRACT;
- FastInterlockOr(EnsureWritablePages(&m_VMFlags),VMFLAG_NOSUPPRESSUNMGDCODEACCESS);
- }
-
- inline BOOL HasSuppressUnmanagedCodeAccessAttr()
- {
- LIMITED_METHOD_CONTRACT;
- return !(m_VMFlags & VMFLAG_NOSUPPRESSUNMGDCODEACCESS);
- }
-
- inline BOOL HasRemotingProxyAttribute()
- {
- LIMITED_METHOD_CONTRACT;
- return m_VMFlags & VMFLAG_REMOTING_PROXY_ATTRIBUTE;
- }
- inline void SetHasRemotingProxyAttribute()
- {
- LIMITED_METHOD_CONTRACT;
- m_VMFlags |= (DWORD)VMFLAG_REMOTING_PROXY_ATTRIBUTE;
- }
inline BOOL IsAlign8Candidate()
{
LIMITED_METHOD_CONTRACT;
@@ -1519,11 +1427,6 @@ public:
m_VMFlags |= VMFLAG_DELEGATE;
}
- // This is only applicable to interfaces. This method does not
- // provide correct information for non-interface types.
- DWORD SomeMethodsRequireInheritanceCheck();
- void SetSomeMethodsRequireInheritanceCheck();
-
BOOL HasFixedAddressVTStatics()
{
LIMITED_METHOD_CONTRACT;
@@ -1725,6 +1628,13 @@ public:
}
#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
+#if defined(FEATURE_HFA)
+ bool CheckForHFA(MethodTable ** pByValueClassCache);
+ VOID CheckForNativeHFA();
+#else // !FEATURE_HFA
+ bool CheckForHFA();
+#endif // FEATURE_HFA
+
#ifdef FEATURE_COMINTEROP
inline TypeHandle GetCoClassForInterface()
{
@@ -2014,35 +1924,7 @@ public:
#endif
VMFLAG_DELEGATE = 0x00000002,
- //Desktop
- // --------------
- //Flag | All Transparent | Critical | All Critical | TreatAsSafe
- //TRANSPARENT | 0 | 0 | 0 | 0
- //ALL_TRANSPARENT | 1 | 0 | 0 | 0
- //CRITICAL | 0 | 1 | 0 | 0
- //TAS_CRITICAL | 0 | 1 | 0 | 1
- //ALLCRITICAL | 0 | 0 | 1 | 0
- //ALLCRITICAL_TAS | 0 | 0 | 1 | 1
- //TAS_NOTCRITICAL | 0 | 0 | 0 | 1
- //
- //
- //On CoreCLR TAS implies Critical and "All Critical" and "Critical" are the same thing.
- //CoreCLR
- // --------------
- //Flag | All Transparent | Critical | TreatAsSafe
- //TRANSPARENT | 0 | 0 | 0
- //ALL_TRANSPARENT | 1 | 0 | 0
- //CRITICAL | 0 | 1 | 0
- //TAS_CRITICAL | 0 | 1 | 1
- VMFLAG_TRANSPARENCY_MASK = 0x0000001c,
- VMFLAG_TRANSPARENCY_UNKNOWN = 0x00000000,
- VMFLAG_TRANSPARENCY_TRANSPARENT = 0x00000004,
- VMFLAG_TRANSPARENCY_ALL_TRANSPARENT = 0x00000008,
- VMFLAG_TRANSPARENCY_CRITICAL = 0x0000000c,
- VMFLAG_TRANSPARENCY_CRITICAL_TAS = 0x00000010,
- VMFLAG_TRANSPARENCY_ALLCRITICAL = 0x00000014,
- VMFLAG_TRANSPARENCY_ALLCRITICAL_TAS = 0x00000018,
- VMFLAG_TRANSPARENCY_TAS_NOTCRITICAL = 0x0000001c,
+ // VMFLAG_UNUSED = 0x0000001c,
VMFLAG_FIXED_ADDRESS_VT_STATICS = 0x00000020, // Value type Statics in this class will be pinned
VMFLAG_HASLAYOUT = 0x00000040,
@@ -2068,13 +1950,13 @@ public:
VMFLAG_BESTFITMAPPING = 0x00004000, // BestFitMappingAttribute.Value
VMFLAG_THROWONUNMAPPABLECHAR = 0x00008000, // BestFitMappingAttribute.ThrowOnUnmappableChar
- VMFLAG_NOSUPPRESSUNMGDCODEACCESS = 0x00010000,
+ // unused = 0x00010000,
VMFLAG_NO_GUID = 0x00020000,
VMFLAG_HASNONPUBLICFIELDS = 0x00040000,
- VMFLAG_REMOTING_PROXY_ATTRIBUTE = 0x00080000,
+ // unused = 0x00080000,
VMFLAG_CONTAINS_STACK_PTR = 0x00100000,
VMFLAG_PREFER_ALIGN8 = 0x00200000, // Would like to have 8-byte alignment
- VMFLAG_METHODS_REQUIRE_INHERITANCE_CHECKS = 0x00400000,
+ // unused = 0x00400000,
#ifdef FEATURE_COMINTEROP
VMFLAG_SPARSE_FOR_COMINTEROP = 0x00800000,
diff --git a/src/vm/class.inl b/src/vm/class.inl
index 78e05cdd14..d411f817d2 100644
--- a/src/vm/class.inl
+++ b/src/vm/class.inl
@@ -5,12 +5,6 @@
// File: CLASS.INL
//
-
-//
-
-//
-// ============================================================================
-
#ifndef _CLASS_INL_
#define _CLASS_INL_
//***************************************************************************************
@@ -20,18 +14,6 @@ inline PTR_MethodDescChunk EEClass::GetChunks()
return m_pChunks.GetValueMaybeNull(PTR_HOST_MEMBER_TADDR(EEClass, this, m_pChunks));
}
-//***************************************************************************************
-inline DWORD EEClass::SomeMethodsRequireInheritanceCheck()
-{
- return (m_VMFlags & VMFLAG_METHODS_REQUIRE_INHERITANCE_CHECKS);
-}
-
-//***************************************************************************************
-inline void EEClass::SetSomeMethodsRequireInheritanceCheck()
-{
- m_VMFlags = m_VMFlags | VMFLAG_METHODS_REQUIRE_INHERITANCE_CHECKS;
-}
-
//*******************************************************************************
#ifndef DACCESS_COMPILE
// Set default values for optional fields.
diff --git a/src/vm/classcompat.cpp b/src/vm/classcompat.cpp
index 031604bc8e..fb97a79e85 100644
--- a/src/vm/classcompat.cpp
+++ b/src/vm/classcompat.cpp
@@ -31,7 +31,6 @@
#include "fieldmarshaler.h"
#include "cgensys.h"
#include "gcheaputilities.h"
-#include "security.h"
#include "dbginterface.h"
#include "comdelegate.h"
#include "sigformat.h"
@@ -54,7 +53,6 @@
#include "clrtocomcall.h"
#include "runtimecallablewrapper.h"
-#include "listlock.inl"
#include "generics.h"
#include "contractimpl.h"
@@ -1308,11 +1306,6 @@ VOID MethodTableBuilder::BuildInteropVTable_PlaceVtableMethods(
// The interface we are attempting to place
MethodTable *pInterface = pCurItfInfo->m_pMethodTable;
- _ASSERTE(!(pCurItfInfo->IsDeclaredOnClass() &&
- !pInterface->IsExternallyVisible() &&
- pInterface->GetAssembly() != bmtType->pModule->GetAssembly() &&
- !Security::CanSkipVerification(GetAssembly()->GetDomainAssembly())));
-
// Did we place this interface already due to the parent class's interface placement?
if (pCurItfInfo->GetInteropStartSlot() != MethodTable::NO_SLOT)
{
diff --git a/src/vm/classnames.h b/src/vm/classnames.h
index 0c24914a56..fc087372c8 100644
--- a/src/vm/classnames.h
+++ b/src/vm/classnames.h
@@ -145,6 +145,7 @@
#define g_CompilerServicesFixedAddressValueTypeAttribute "System.Runtime.CompilerServices.FixedAddressValueTypeAttribute"
#define g_CompilerServicesUnsafeValueTypeAttribute "System.Runtime.CompilerServices.UnsafeValueTypeAttribute"
+#define g_CompilerServicesIntrinsicAttribute "System.Runtime.CompilerServices.IntrinsicAttribute"
#define g_UnmanagedFunctionPointerAttribute "System.Runtime.InteropServices.UnmanagedFunctionPointerAttribute"
#define g_DefaultDllImportSearchPathsAttribute "System.Runtime.InteropServices.DefaultDllImportSearchPathsAttribute"
#define g_NativeCallableAttribute "System.Runtime.InteropServices.NativeCallableAttribute"
diff --git a/src/vm/clrex.cpp b/src/vm/clrex.cpp
index ba040b7e81..3b21d649d6 100644
--- a/src/vm/clrex.cpp
+++ b/src/vm/clrex.cpp
@@ -1224,7 +1224,7 @@ OBJECTREF EEException::CreateThrowable()
#endif
}
-RuntimeExceptionKind EEException::GetKindFromHR(HRESULT hr, bool fIsWinRtMode)
+RuntimeExceptionKind EEException::GetKindFromHR(HRESULT hr, bool fIsWinRtMode /*= false*/)
{
LIMITED_METHOD_CONTRACT;
diff --git a/src/vm/clrex.h b/src/vm/clrex.h
index ce55ebcefa..12eb702be1 100644
--- a/src/vm/clrex.h
+++ b/src/vm/clrex.h
@@ -1095,7 +1095,7 @@ inline EEMessageException::EEMessageException(HRESULT hr)
}
inline EEMessageException::EEMessageException(HRESULT hr, bool fUseCOMException)
- : EEException(GetKindFromHR(hr, fUseCOMException)),
+ : EEException(GetKindFromHR(hr, !fUseCOMException)),
m_hr(hr),
m_resID(0)
{
diff --git a/src/vm/clsload.cpp b/src/vm/clsload.cpp
index da48549e7b..22b030caa0 100644
--- a/src/vm/clsload.cpp
+++ b/src/vm/clsload.cpp
@@ -23,7 +23,6 @@
#include "comsynchronizable.h"
#include "threads.h"
#include "dllimport.h"
-#include "security.h"
#include "dbginterface.h"
#include "log.h"
#include "eeconfig.h"
@@ -4869,27 +4868,6 @@ StaticAccessCheckContext::StaticAccessCheckContext(MethodDesc* pCallerMethod, Me
m_pCallerAssembly = pCallerType->GetAssembly();
}
-// Critical callers do not need the extra access checks
-bool StaticAccessCheckContext::IsCallerCritical()
-{
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- if (m_pCallerMethod == NULL || !Security::IsMethodTransparent(m_pCallerMethod))
- {
- return true;
- }
-
- return false;
-}
-
-
-
//******************************************************************************
// static
@@ -4911,8 +4889,7 @@ void AccessCheckOptions::Startup()
//******************************************************************************
AccessCheckOptions::AccessCheckOptions(
const AccessCheckOptions & templateOptions,
- BOOL throwIfTargetIsInaccessible,
- BOOL skipCheckForCriticalCode /*=FALSE*/) :
+ BOOL throwIfTargetIsInaccessible) :
m_pAccessContext(templateOptions.m_pAccessContext)
{
WRAPPER_NO_CONTRACT;
@@ -4922,8 +4899,7 @@ AccessCheckOptions::AccessCheckOptions(
throwIfTargetIsInaccessible,
templateOptions.m_pTargetMT,
templateOptions.m_pTargetMethod,
- templateOptions.m_pTargetField,
- skipCheckForCriticalCode);
+ templateOptions.m_pTargetField);
}
//******************************************************************************
@@ -4978,36 +4954,15 @@ BOOL AccessCheckOptions::DemandMemberAccess(AccessCheckContext *pContext, Method
// classes/members in app code.
if (m_accessCheckType != kMemberAccess && pTargetMT)
{
- if (visibilityCheck && Security::IsTransparencyEnforcementEnabled())
- {
- // In CoreCLR RMA means visibility checks always succeed if the target is user code.
- if (m_accessCheckType == kRestrictedMemberAccess || m_accessCheckType == kRestrictedMemberAccessNoTransparency)
- return TRUE;
-
- // Accessing private types/members in platform code.
- fAccessingFrameworkCode = TRUE;
- }
- else
- {
- // We allow all transparency checks to succeed in LCG methods and reflection invocation.
- if (m_accessCheckType == kNormalAccessNoTransparency || m_accessCheckType == kRestrictedMemberAccessNoTransparency)
- return TRUE;
- }
+ // We allow all transparency checks to succeed in LCG methods and reflection invocation.
+ if (m_accessCheckType == kNormalAccessNoTransparency || m_accessCheckType == kRestrictedMemberAccessNoTransparency)
+ return TRUE;
}
// Always allow interop (NULL) callers full access.
if (pContext->IsCalledFromInterop())
return TRUE;
- MethodDesc* pCallerMD = pContext->GetCallerMethod();
-
- // critical code is exempted from all accessibility rules, regardless of the AccessCheckType.
- if (pCallerMD != NULL &&
- !Security::IsMethodTransparent(pCallerMD))
- {
- return TRUE;
- }
-
// No Access
if (m_fThrowIfTargetIsInaccessible)
{
@@ -5090,15 +5045,6 @@ BOOL AccessCheckOptions::DemandMemberAccessOrFail(AccessCheckContext *pContext,
}
CONTRACTL_END;
- // m_fSkipCheckForCriticalCode is only ever set to true for CanAccessMemberForExtraChecks.
- // For legacy compat we allow the access check to succeed for all AccessCheckType if the caller is critical.
- if (m_fSkipCheckForCriticalCode)
- {
- if (pContext->IsCalledFromInterop() ||
- !Security::IsMethodTransparent(pContext->GetCallerMethod()))
- return TRUE;
- }
-
if (DoNormalAccessibilityChecks())
{
if (pContext->GetCallerAssembly()->IgnoresAccessChecksTo(pTargetMT->GetAssembly()))
@@ -5131,15 +5077,6 @@ BOOL AccessCheckOptions::FailOrThrow(AccessCheckContext *pContext) const
}
CONTRACTL_END;
- // m_fSkipCheckForCriticalCode is only ever set to true for CanAccessMemberForExtraChecks.
- // For legacy compat we allow the access check to succeed for all AccessCheckType if the caller is critical.
- if (m_fSkipCheckForCriticalCode)
- {
- if (pContext->IsCalledFromInterop() ||
- !Security::IsMethodTransparent(pContext->GetCallerMethod()))
- return TRUE;
- }
-
if (m_fThrowIfTargetIsInaccessible)
{
ThrowAccessException(pContext);
@@ -5151,7 +5088,6 @@ BOOL AccessCheckOptions::FailOrThrow(AccessCheckContext *pContext) const
// Generate access exception context strings that are due to potential security misconfiguration
void GetAccessExceptionAdditionalContextForSecurity(Assembly *pAccessingAssembly,
Assembly *pTargetAssembly,
- BOOL isTransparencyError,
BOOL fAccessingFrameworkCode,
StringArrayList *pContextInformation)
{
@@ -5182,7 +5118,6 @@ void GetAccessExceptionAdditionalContextForSecurity(Assembly *pAccessingAssembly
// context is available, then this returns SString.Empty.
SString GetAdditionalAccessExceptionContext(Assembly *pAccessingAssembly,
Assembly *pTargetAssembly,
- BOOL isTransparencyError,
BOOL fAccessingFrameworkCode)
{
CONTRACTL
@@ -5200,7 +5135,6 @@ SString GetAdditionalAccessExceptionContext(Assembly *pAccessingAssembly,
// See if the exception may have been caused by security
GetAccessExceptionAdditionalContextForSecurity(pAccessingAssembly,
pTargetAssembly,
- isTransparencyError,
fAccessingFrameworkCode,
&contextComponents);
@@ -5236,15 +5170,10 @@ void DECLSPEC_NORETURN ThrowFieldAccessException(AccessCheckContext* pContext,
}
CONTRACTL_END;
- BOOL isTransparencyError = FALSE;
-
MethodDesc* pCallerMD = pContext->GetCallerMethod();
- if (pCallerMD != NULL)
- isTransparencyError = !Security::CheckCriticalAccess(pContext, NULL, pFD, NULL);
ThrowFieldAccessException(pCallerMD,
pFD,
- isTransparencyError,
messageID,
pInnerException,
fAccessingFrameworkCode);
@@ -5252,7 +5181,6 @@ void DECLSPEC_NORETURN ThrowFieldAccessException(AccessCheckContext* pContext,
void DECLSPEC_NORETURN ThrowFieldAccessException(MethodDesc* pCallerMD,
FieldDesc *pFD,
- BOOL isTransparencyError,
UINT messageID /* = 0 */,
Exception *pInnerException /* = NULL */,
BOOL fAccessingFrameworkCode /* = FALSE */)
@@ -5271,22 +5199,11 @@ void DECLSPEC_NORETURN ThrowFieldAccessException(MethodDesc* pCallerMD,
{
if (messageID == 0)
{
- // Figure out if we can give a specific reason why this field access was rejected - for instance, if
- // we see that the caller is transparent and accessing a critical field, then we can put that
- // information into the exception message.
- if (isTransparencyError)
- {
- messageID = IDS_E_CRITICAL_FIELD_ACCESS_DENIED;
- }
- else
- {
- messageID = IDS_E_FIELDACCESS;
- }
+ messageID = IDS_E_FIELDACCESS;
}
SString strAdditionalContext = GetAdditionalAccessExceptionContext(pCallerMD->GetAssembly(),
pFD->GetApproxEnclosingMethodTable()->GetAssembly(),
- isTransparencyError,
fAccessingFrameworkCode);
EX_THROW_WITH_INNER(EEFieldException, (pFD, pCallerMD, strAdditionalContext, messageID), pInnerException);
@@ -5313,15 +5230,10 @@ void DECLSPEC_NORETURN ThrowMethodAccessException(AccessCheckContext* pContext,
}
CONTRACTL_END;
- BOOL isTransparencyError = FALSE;
-
MethodDesc* pCallerMD = pContext->GetCallerMethod();
- if (pCallerMD != NULL)
- isTransparencyError = !Security::CheckCriticalAccess(pContext, pCalleeMD, NULL, NULL);
ThrowMethodAccessException(pCallerMD,
pCalleeMD,
- isTransparencyError,
messageID,
pInnerException,
fAccessingFrameworkCode);
@@ -5329,7 +5241,6 @@ void DECLSPEC_NORETURN ThrowMethodAccessException(AccessCheckContext* pContext,
void DECLSPEC_NORETURN ThrowMethodAccessException(MethodDesc* pCallerMD,
MethodDesc *pCalleeMD,
- BOOL isTransparencyError,
UINT messageID /* = 0 */,
Exception *pInnerException /* = NULL */,
BOOL fAccessingFrameworkCode /* = FALSE */)
@@ -5348,22 +5259,11 @@ void DECLSPEC_NORETURN ThrowMethodAccessException(MethodDesc* pCallerMD,
{
if (messageID == 0)
{
- // Figure out if we can give a specific reason why this method access was rejected - for instance, if
- // we see that the caller is transparent and the callee is critical, then we can put that
- // information into the exception message.
- if (isTransparencyError)
- {
- messageID = IDS_E_CRITICAL_METHOD_ACCESS_DENIED;
- }
- else
- {
- messageID = IDS_E_METHODACCESS;
- }
+ messageID = IDS_E_METHODACCESS;
}
SString strAdditionalContext = GetAdditionalAccessExceptionContext(pCallerMD->GetAssembly(),
pCalleeMD->GetAssembly(),
- isTransparencyError,
fAccessingFrameworkCode);
EX_THROW_WITH_INNER(EEMethodException, (pCalleeMD, pCallerMD, strAdditionalContext, messageID), pInnerException);
@@ -5390,15 +5290,10 @@ void DECLSPEC_NORETURN ThrowTypeAccessException(AccessCheckContext* pContext,
}
CONTRACTL_END;
- BOOL isTransparencyError = FALSE;
-
MethodDesc* pCallerMD = pContext->GetCallerMethod();
- if (pCallerMD != NULL)
- isTransparencyError = !Security::CheckCriticalAccess(pContext, NULL, NULL, pMT);
ThrowTypeAccessException(pCallerMD,
pMT,
- isTransparencyError,
messageID,
pInnerException,
fAccessingFrameworkCode);
@@ -5406,7 +5301,6 @@ void DECLSPEC_NORETURN ThrowTypeAccessException(AccessCheckContext* pContext,
void DECLSPEC_NORETURN ThrowTypeAccessException(MethodDesc* pCallerMD,
MethodTable *pMT,
- BOOL isTransparencyError,
UINT messageID /* = 0 */,
Exception *pInnerException /* = NULL */,
BOOL fAccessingFrameworkCode /* = FALSE */)
@@ -5425,22 +5319,11 @@ void DECLSPEC_NORETURN ThrowTypeAccessException(MethodDesc* pCallerMD,
{
if (messageID == 0)
{
- // Figure out if we can give a specific reason why this type access was rejected - for instance, if
- // we see that the caller is transparent and is accessing a critical type, then we can put that
- // information into the exception message.
- if (isTransparencyError)
- {
- messageID = IDS_E_CRITICAL_TYPE_ACCESS_DENIED;
- }
- else
- {
- messageID = IDS_E_TYPEACCESS;
- }
+ messageID = IDS_E_TYPEACCESS;
}
SString strAdditionalContext = GetAdditionalAccessExceptionContext(pCallerMD->GetAssembly(),
pMT->GetAssembly(),
- isTransparencyError,
fAccessingFrameworkCode);
EX_THROW_WITH_INNER(EETypeAccessException, (pMT, pCallerMD, strAdditionalContext, messageID), pInnerException);
@@ -5451,51 +5334,6 @@ void DECLSPEC_NORETURN ThrowTypeAccessException(MethodDesc* pCallerMD,
}
}
-//******************************************************************************
-// This function determines whether a method [if transparent]
-// can access a specified target (e.g. Type, Method, Field)
-static BOOL CheckTransparentAccessToCriticalCode(
- AccessCheckContext* pContext,
- DWORD dwMemberAccess,
- MethodTable* pTargetMT,
- MethodDesc* pOptionalTargetMethod,
- FieldDesc* pOptionalTargetField,
- MethodTable* pOptionalTargetType,
- const AccessCheckOptions & accessCheckOptions)
-{
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- MODE_ANY;
- PRECONDITION(CheckPointer(pContext));
- PRECONDITION(accessCheckOptions.TransparencyCheckNeeded());
- }
- CONTRACTL_END;
-
- if (!Security::IsTransparencyEnforcementEnabled())
- return TRUE;
-
- // At most one of these should be non-NULL
- _ASSERTE(1 >= ((pOptionalTargetMethod ? 1 : 0) +
- (pOptionalTargetField ? 1 : 0) +
- (pOptionalTargetType ? 1 : 0)));
-
-
- // if the caller [Method] is transparent, do special security checks
- // check if security disallows access to target member
- if (!Security::CheckCriticalAccess(
- pContext,
- pOptionalTargetMethod,
- pOptionalTargetField,
- pOptionalTargetType))
- {
- return accessCheckOptions.DemandMemberAccessOrFail(pContext, pTargetMT, FALSE /*visibilityCheck*/);
- }
-
- return TRUE;
-} // static BOOL CheckTransparentAccessToCriticalCode
-
//---------------------------------------------------------------------------------------
//
// Checks to see if access to a member with assembly visiblity is allowed.
@@ -5620,8 +5458,7 @@ BOOL ClassLoader::CanAccessClass( // True if access is legal,
AccessCheckContext* pContext, // The caller context
MethodTable* pTargetClass, // The desired target class.
Assembly* pTargetAssembly, // Assembly containing the target class.
- const AccessCheckOptions & accessCheckOptions,
- BOOL checkTargetTypeTransparency)// = TRUE
+ const AccessCheckOptions & accessCheckOptions)// = TRUE
{
CONTRACTL
{
@@ -5639,26 +5476,6 @@ BOOL ClassLoader::CanAccessClass( // True if access is legal,
//if (!pTargetClass)
// return TRUE;
- // check transparent/critical on type
- // Note that dwMemberAccess is of no use here since we don't have a target method yet. It really should be made an optional arg.
- // For now, we pass in mdPublic.
- if (checkTargetTypeTransparency && accessCheckOptions.TransparencyCheckNeeded())
- {
- if (!CheckTransparentAccessToCriticalCode(
- pContext,
- mdPublic,
- pTargetClass,
- NULL,
- NULL,
- pTargetClass,
- accessCheckOptions))
- {
- // no need to call accessCheckOptions.DemandMemberAccessOrFail here because
- // CheckTransparentAccessToCriticalCode does that already
- return FALSE;
- }
- }
-
// Step 2: Recursively call CanAccessClass on the generic type arguments
// Is the desired target a generic instantiation?
if (pTargetClass->HasInstantiation())
@@ -5679,8 +5496,7 @@ BOOL ClassLoader::CanAccessClass( // True if access is legal,
pContext,
pMT,
th.GetAssembly(),
- accessCheckOptions,
- checkTargetTypeTransparency))
+ accessCheckOptions))
{
// no need to call accessCheckOptions.DemandMemberAccessOrFail here because the base case in
// CanAccessClass does that already
@@ -5780,23 +5596,14 @@ BOOL ClassLoader::CanAccessClass( // True if access is legal,
dwProtection,
NULL,
NULL,
- accessCheckOptions,
- FALSE,
- FALSE);
+ accessCheckOptions);
} // BOOL ClassLoader::CanAccessClass()
//******************************************************************************
// This is a front-end to CheckAccessMember that handles the nested class scope. If can't access
// from the current point and are a nested class, then try from the enclosing class.
-// It does two things in addition to CanAccessMember:
-// 1. If the caller class doesn't have access to the caller, see if the enclosing class does.
-// 2. CanAccessMemberForExtraChecks which checks whether the caller class has access to
-// the signature of the target method or field.
+// In addition to CanAccessMember, if the caller class doesn't have access to the caller, see if the enclosing class does.
//
-// checkTargetMethodTransparency is set to FALSE only when the check is for JIT-compilation
-// because the JIT has a mechanism to insert a callout for the case where
-// we need to perform the currentMD <-> TargetMD check at runtime.
-
/* static */
BOOL ClassLoader::CanAccess( // TRUE if access is allowed, FALSE otherwise.
AccessCheckContext* pContext, // The caller context
@@ -5806,9 +5613,7 @@ BOOL ClassLoader::CanAccess( // TRUE if access is all
MethodDesc* pOptionalTargetMethod, // The target method; NULL if the target is a not a method or
// there is no need to check the method's instantiation.
FieldDesc* pOptionalTargetField, // or The desired field; if NULL, return TRUE
- const AccessCheckOptions & accessCheckOptions, // = s_NormalAccessChecks
- BOOL checkTargetMethodTransparency, // = TRUE
- BOOL checkTargetTypeTransparency) // = TRUE
+ const AccessCheckOptions & accessCheckOptions) // = s_NormalAccessChecks
{
CONTRACT(BOOL)
{
@@ -5833,9 +5638,7 @@ BOOL ClassLoader::CanAccess( // TRUE if access is all
pOptionalTargetField,
// Suppress exceptions for nested classes since this is not a hard-failure,
// and we can do additional checks
- accessCheckOptionsNoThrow,
- checkTargetMethodTransparency,
- checkTargetTypeTransparency))
+ accessCheckOptionsNoThrow))
{
// If we're here, CheckAccessMember didn't allow access.
BOOL canAccess = FALSE;
@@ -5869,9 +5672,7 @@ BOOL ClassLoader::CanAccess( // TRUE if access is all
dwMemberAccess,
pOptionalTargetMethod,
pOptionalTargetField,
- accessCheckOptionsNoThrow,
- checkTargetMethodTransparency,
- checkTargetTypeTransparency);
+ accessCheckOptionsNoThrow);
}
if (!canAccess)
@@ -5881,212 +5682,12 @@ BOOL ClassLoader::CanAccess( // TRUE if access is all
}
}
- // For member access, we do additional checks to ensure that the specific member can
- // be accessed
-
- if (!CanAccessMemberForExtraChecks(
- pContext,
- pTargetMT,
- pOptionalTargetMethod,
- pOptionalTargetField,
- accessCheckOptions,
- checkTargetMethodTransparency))
- {
- RETURN_FROM_INTERIOR_PROBE(FALSE);
- }
-
RETURN_FROM_INTERIOR_PROBE(TRUE);
END_INTERIOR_STACK_PROBE;
} // BOOL ClassLoader::CanAccess()
//******************************************************************************
-// Performs additional checks for member access
-
-BOOL ClassLoader::CanAccessMemberForExtraChecks(
- AccessCheckContext* pContext,
- MethodTable* pTargetExactMT,
- MethodDesc* pOptionalTargetMethod,
- FieldDesc* pOptionalTargetField,
- const AccessCheckOptions & accessCheckOptions,
- BOOL checkTargetMethodTransparency)
-{
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- MODE_ANY;
- PRECONDITION(CheckPointer(pContext));
- }
- CONTRACTL_END;
-
- // Critical callers do not need the extra checks
- // This early-out saves the cost of all the subsequent work
- if (pContext->IsCallerCritical())
- {
- return TRUE;
- }
-
- if (pOptionalTargetMethod == NULL && pOptionalTargetField == NULL)
- return TRUE;
-
- _ASSERTE((pOptionalTargetMethod == NULL) != (pOptionalTargetField == NULL));
-
- // We should always do checks on member signatures. But for backward compatibility we skip this check
- // for critical callers. And since we don't want to look for the caller here which might incur a stack walk,
- // we delay the check to DemandMemberAccessOrFail time.
- AccessCheckOptions legacyAccessCheckOptions(accessCheckOptions, accessCheckOptions.Throws(), TRUE);
-
- if (pOptionalTargetMethod)
- {
- // A method is accessible only if all the types in the signature
- // are also accessible.
- if (!CanAccessSigForExtraChecks(pContext,
- pOptionalTargetMethod,
- pTargetExactMT,
- legacyAccessCheckOptions,
- checkTargetMethodTransparency))
- {
- return FALSE;
- }
- }
- else
- {
- _ASSERTE(pOptionalTargetField != NULL);
-
- // A field is accessible only if the field type is also accessible
-
- TypeHandle fieldType = pOptionalTargetField->GetExactFieldType(TypeHandle(pTargetExactMT));
- CorElementType fieldCorType = fieldType.GetSignatureCorElementType();
-
- MethodTable * pFieldTypeMT = fieldType.GetMethodTableOfElementType();
-
- // No access check needed on a generic variable or a function pointer
- if (pFieldTypeMT != NULL)
- {
- if (!CanAccessClassForExtraChecks(pContext,
- pFieldTypeMT,
- pFieldTypeMT->GetAssembly(),
- legacyAccessCheckOptions,
- TRUE))
- {
- return FALSE;
- }
- }
- }
-
- return TRUE;
-}
-
-//******************************************************************************
-// Can all the types in the signature of the pTargetMethodSig be accessed?
-//
-// "ForExtraChecks" means that we only do extra checks (security and transparency)
-// instead of the usual loader visibility checks. Post V2, we can enable all checks.
-
-BOOL ClassLoader::CanAccessSigForExtraChecks( // TRUE if access is allowed, FALSE otherwise.
- AccessCheckContext* pContext,
- MethodDesc* pTargetMethodSig, // The target method. If this is a shared method, pTargetExactMT gives
- // additional information about the exact type
- MethodTable* pTargetExactMT, // or The desired field; if NULL, return TRUE
- const AccessCheckOptions & accessCheckOptions,
- BOOL checkTargetTransparency)
-{
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- MODE_ANY;
- PRECONDITION(CheckPointer(pContext));
- }
- CONTRACTL_END;
-
- MetaSig sig(pTargetMethodSig, TypeHandle(pTargetExactMT));
-
- // First, check the return type
-
- TypeHandle retType = sig.GetRetTypeHandleThrowing();
- MethodTable * pRetMT = retType.GetMethodTableOfElementType();
-
- // No access check needed on a generic variable or a function pointer
- if (pRetMT != NULL)
- {
- if (!CanAccessClassForExtraChecks(pContext,
- pRetMT,
- retType.GetAssembly(),
- accessCheckOptions,
- checkTargetTransparency))
- {
- return FALSE;
- }
- }
-
- //
- // Now walk all the arguments in the signature
- //
-
- for (CorElementType argType = sig.NextArg(); argType != ELEMENT_TYPE_END; argType = sig.NextArg())
- {
- TypeHandle thArg = sig.GetLastTypeHandleThrowing();
-
- MethodTable * pArgMT = thArg.GetMethodTableOfElementType();
-
- // Either a TypeVarTypeDesc or a FnPtrTypeDesc. No access check needed.
- if (pArgMT == NULL)
- continue;
-
- BOOL canAcesssElement = CanAccessClassForExtraChecks(
- pContext,
- pArgMT,
- thArg.GetAssembly(),
- accessCheckOptions,
- checkTargetTransparency);
- if (!canAcesssElement)
- {
- return FALSE;
- }
- }
-
- return TRUE;
-}
-
-//******************************************************************************
-// Can the type be accessed?
-//
-// "ForExtraChecks" means that we only do extra checks (security and transparency)
-// instead of the usual loader visibility checks. Post V2, we can enable all checks.
-
-BOOL ClassLoader::CanAccessClassForExtraChecks( // True if access is legal, false otherwise.
- AccessCheckContext* pContext,
- MethodTable* pTargetClass, // The desired target class.
- Assembly* pTargetAssembly, // Assembly containing that class.
- const AccessCheckOptions & accessCheckOptions,
- BOOL checkTargetTypeTransparency)
-{
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- MODE_ANY;
- PRECONDITION(CheckPointer(pContext));
- }
- CONTRACTL_END;
-
- // ------------- Old comments begins ------------
- // Critical callers do not need the extra checks
- // TODO: can we enable full access checks now?
- // ------------- Old comments ends ------------
-
- // We shouldn't bypass accessibility check on member signature for FT/Critical callers
-
- return CanAccessClass(pContext,
- pTargetClass,
- pTargetAssembly,
- accessCheckOptions,
- checkTargetTypeTransparency);
-}
-
-//******************************************************************************
// This is the helper function for the corresponding CanAccess()
// It does the following checks:
// 1. CanAccessClass on pTargetMT
@@ -6103,9 +5704,7 @@ BOOL ClassLoader::CheckAccessMember( // TRUE if access is allowed
MethodDesc* pOptionalTargetMethod, // The target method; NULL if the target is a not a method or
// there is no need to check the method's instantiation.
FieldDesc* pOptionalTargetField, // target field, NULL if there is no Target field
- const AccessCheckOptions & accessCheckOptions,
- BOOL checkTargetMethodTransparency,
- BOOL checkTargetTypeTransparency
+ const AccessCheckOptions & accessCheckOptions
)
{
CONTRACTL
@@ -6124,17 +5723,13 @@ BOOL ClassLoader::CheckAccessMember( // TRUE if access is allowed
if (!CanAccessClass(pContext,
pTargetMT,
pTargetAssembly,
- accessCheckOptions,
- checkTargetTypeTransparency))
+ accessCheckOptions))
{
return FALSE;
}
// If we are trying to access a generic method, we have to ensure its instantiation is accessible.
// Note that we need to perform transparency checks on the instantiation even if we have
- // checkTargetMethodTransparency set to false, since generic type parameters by design do not effect
- // the transparency of the generic method that is closing over them. This means standard transparency
- // checks between caller and closed callee may succeed even if the callee's closure includes a critical type.
if (!CanAccessMethodInstantiation(
pContext,
pOptionalTargetMethod,
@@ -6150,23 +5745,6 @@ BOOL ClassLoader::CheckAccessMember( // TRUE if access is allowed
// We don't need to do transparency check against pTargetMT here because
// it was already done in CanAccessClass above.
- if (accessCheckOptions.TransparencyCheckNeeded() &&
- ((checkTargetMethodTransparency && pOptionalTargetMethod) ||
- pOptionalTargetField))
- {
- if (!CheckTransparentAccessToCriticalCode(
- pContext,
- dwMemberAccess,
- pTargetMT,
- pOptionalTargetMethod,
- pOptionalTargetField,
- NULL,
- accessCheckOptions))
- {
- return FALSE;
- }
- }
-
if (IsMdPublic(dwMemberAccess))
{
return TRUE;
diff --git a/src/vm/clsload.hpp b/src/vm/clsload.hpp
index 656f260e01..5a9248e422 100644
--- a/src/vm/clsload.hpp
+++ b/src/vm/clsload.hpp
@@ -317,7 +317,6 @@ public:
virtual MethodTable* GetCallerMT() = 0; // The class that wants access; NULL if interop caller.
virtual Assembly* GetCallerAssembly() = 0; // Assembly containing that class.
virtual bool IsCalledFromInterop() = 0;
- virtual bool IsCallerCritical() = 0; // Can we do a quick check for caller's transparency status?
};
class StaticAccessCheckContext : public AccessCheckContext
@@ -367,8 +366,6 @@ public:
return false;
}
- virtual bool IsCallerCritical();
-
private:
MethodDesc* m_pCallerMethod;
MethodTable* m_pCallerMT;
@@ -429,8 +426,7 @@ public:
AccessCheckOptions(
const AccessCheckOptions & templateAccessCheckOptions,
- BOOL throwIfTargetIsInaccessible,
- BOOL skipCheckForCriticalCode = FALSE);
+ BOOL throwIfTargetIsInaccessible);
// Follow standard rules for doing accessability
BOOL DoNormalAccessibilityChecks() const
@@ -471,8 +467,7 @@ private:
BOOL throwIfTargetIsInaccessible,
MethodTable * pTargetMT,
MethodDesc * pTargetMD,
- FieldDesc * pTargetFD,
- BOOL skipCheckForCriticalCode = FALSE);
+ FieldDesc * pTargetFD);
BOOL DemandMemberAccess(AccessCheckContext *pContext, MethodTable * pTargetMT, BOOL visibilityCheck) const;
@@ -493,27 +488,22 @@ private:
DynamicResolver * m_pAccessContext;
// If the target is not accessible, should the API return FALSE, or should it throw an exception?
BOOL m_fThrowIfTargetIsInaccessible;
- // flag to enable legacy behavior in ClassLoader::CanAccessMemberForExtraChecks.
- BOOL m_fSkipCheckForCriticalCode;
};
void DECLSPEC_NORETURN ThrowFieldAccessException(MethodDesc *pCallerMD,
FieldDesc *pFD,
- BOOL isTransparencyError,
UINT messageID = 0,
Exception *pInnerException = NULL,
BOOL fAccessingFrameworkCode = FALSE);
void DECLSPEC_NORETURN ThrowMethodAccessException(MethodDesc *pCallerMD,
MethodDesc *pCalleeMD,
- BOOL isTransparencyError,
UINT messageID = 0,
Exception *pInnerException = NULL,
BOOL fAccessingFrameworkCode = FALSE);
void DECLSPEC_NORETURN ThrowTypeAccessException(MethodDesc *pCallerMD,
MethodTable *pMT,
- BOOL isTransparencyError,
UINT messageID = 0,
Exception *pInnerException = NULL,
BOOL fAccessingFrameworkCode = FALSE);
@@ -889,8 +879,7 @@ public:
AccessCheckContext* pContext,
MethodTable* pTargetClass,
Assembly* pTargetAssembly,
- const AccessCheckOptions & accessCheckOptions = *AccessCheckOptions::s_pNormalAccessChecks,
- BOOL checkTargetTypeTransparency = TRUE);
+ const AccessCheckOptions & accessCheckOptions = *AccessCheckOptions::s_pNormalAccessChecks);
static BOOL CanAccess(
AccessCheckContext* pContext,
@@ -899,16 +888,7 @@ public:
DWORD dwMemberAttrs,
MethodDesc* pOptionalTargetMethod,
FieldDesc* pOptionalTargetField,
- const AccessCheckOptions & accessCheckOptions = *AccessCheckOptions::s_pNormalAccessChecks,
- BOOL checkTargetMethodTransparency = TRUE,
- BOOL checkTargetTypeTransparency = TRUE);
-
- static BOOL CanAccessClassForExtraChecks(
- AccessCheckContext* pContext,
- MethodTable* pTargetClass,
- Assembly* pTargetAssembly,
- const AccessCheckOptions & accessCheckOptions,
- BOOL checkTargetTypeTransparency);
+ const AccessCheckOptions & accessCheckOptions = *AccessCheckOptions::s_pNormalAccessChecks);
static BOOL CanAccessFamilyVerification(
TypeHandle thCurrentClass,
@@ -921,21 +901,6 @@ private:
MethodDesc* pOptionalTargetMethod,
const AccessCheckOptions & accessCheckOptions);
- static BOOL CanAccessMemberForExtraChecks(
- AccessCheckContext* pContext,
- MethodTable* pTargetExactMT,
- MethodDesc* pOptionalTargetMethod,
- FieldDesc* pOptionalTargetField,
- const AccessCheckOptions & accessCheckOptions,
- BOOL checkTargetMethodTransparency);
-
- static BOOL CanAccessSigForExtraChecks(
- AccessCheckContext* pContext,
- MethodDesc* pTargetMethodSig,
- MethodTable* pTargetExactMT,
- const AccessCheckOptions & accessCheckOptions,
- BOOL checkTargetTransparency);
-
static BOOL CanAccessFamily(
MethodTable* pCurrentClass,
MethodTable* pTargetClass);
@@ -947,9 +912,7 @@ private:
DWORD dwMemberAttrs,
MethodDesc* pOptionalTargetMethod,
FieldDesc* pOptionalTargetField,
- const AccessCheckOptions & accessCheckOptions = *AccessCheckOptions::s_pNormalAccessChecks,
- BOOL checkTargetMethodTransparency = TRUE,
- BOOL checkTargetTypeTransparency = TRUE);
+ const AccessCheckOptions & accessCheckOptions = *AccessCheckOptions::s_pNormalAccessChecks);
public:
diff --git a/src/vm/clsload.inl b/src/vm/clsload.inl
index 991498ec9c..7dcd1a5d00 100644
--- a/src/vm/clsload.inl
+++ b/src/vm/clsload.inl
@@ -64,8 +64,7 @@ inline void AccessCheckOptions::Initialize(
BOOL throwIfTargetIsInaccessible,
MethodTable * pTargetMT,
MethodDesc * pTargetMethod,
- FieldDesc * pTargetField,
- BOOL skipCheckForCriticalCode /*=FALSE*/)
+ FieldDesc * pTargetField)
{
CONTRACTL
{
@@ -90,7 +89,6 @@ inline void AccessCheckOptions::Initialize(
m_pTargetMT = pTargetMT;
m_pTargetMethod = pTargetMethod;
m_pTargetField = pTargetField;
- m_fSkipCheckForCriticalCode = skipCheckForCriticalCode;
}
//******************************************************************************
diff --git a/src/vm/codeman.cpp b/src/vm/codeman.cpp
index d934b824f6..7d90ce9a5e 100644
--- a/src/vm/codeman.cpp
+++ b/src/vm/codeman.cpp
@@ -38,6 +38,10 @@
#include "../debug/daccess/fntableaccess.h"
#endif // _WIN64
+#ifdef FEATURE_PERFMAP
+#include "perfmap.h"
+#endif
+
#define MAX_M_ALLOCATED (16 * 1024)
// Default number of jump stubs in a jump stub block
@@ -2186,7 +2190,7 @@ HeapList* EEJitManager::NewCodeHeap(CodeHeapRequestInfo *pInfo, DomainCodeHeapLi
} CONTRACT_END;
size_t initialRequestSize = pInfo->getRequestSize();
- size_t minReserveSize = VIRTUAL_ALLOC_RESERVE_GRANULARITY; // ( 64 KB)
+ size_t minReserveSize = VIRTUAL_ALLOC_RESERVE_GRANULARITY; // ( 64 KB)
#ifdef _WIN64
if (pInfo->m_hiAddr == 0)
@@ -2390,12 +2394,12 @@ void* EEJitManager::allocCodeRaw(CodeHeapRequestInfo *pInfo,
{
// Let us create a new heap.
- DomainCodeHeapList *pList = GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator);
+ DomainCodeHeapList *pList = GetCodeHeapList(pInfo, pInfo->m_pAllocator);
if (pList == NULL)
{
// not found so need to create the first one
pList = CreateCodeHeapList(pInfo);
- _ASSERTE(pList == GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator));
+ _ASSERTE(pList == GetCodeHeapList(pInfo, pInfo->m_pAllocator));
}
_ASSERTE(pList);
@@ -2478,23 +2482,29 @@ CodeHeader* EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, CorJitAll
SIZE_T totalSize = blockSize;
+ CodeHeader * pCodeHdr = NULL;
+
+ CodeHeapRequestInfo requestInfo(pMD);
+#if defined(FEATURE_JIT_PITCHING)
+ if (pMD && pMD->IsPitchable() && CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchMethodSizeThreshold) < blockSize)
+ {
+ requestInfo.SetDynamicDomain();
+ }
+#endif
+
#if defined(USE_INDIRECT_CODEHEADER)
SIZE_T realHeaderSize = offsetof(RealCodeHeader, unwindInfos[0]) + (sizeof(T_RUNTIME_FUNCTION) * nUnwindInfos);
// if this is a LCG method then we will be allocating the RealCodeHeader
// following the code so that the code block can be removed easily by
// the LCG code heap.
- if (pMD->IsLCGMethod())
+ if (requestInfo.IsDynamicDomain())
{
totalSize = ALIGN_UP(totalSize, sizeof(void*)) + realHeaderSize;
static_assert_no_msg(CODE_SIZE_ALIGN >= sizeof(void*));
}
#endif // USE_INDIRECT_CODEHEADER
- CodeHeader * pCodeHdr = NULL;
-
- CodeHeapRequestInfo requestInfo(pMD);
-
// Scope the lock
{
CrstHolder ch(&m_CodeHeapCritSec);
@@ -2521,7 +2531,7 @@ CodeHeader* EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, CorJitAll
pCodeHdr = ((CodeHeader *)pCode) - 1;
#ifdef USE_INDIRECT_CODEHEADER
- if (pMD->IsLCGMethod())
+ if (requestInfo.IsDynamicDomain())
{
pCodeHdr->SetRealCodeHeader((BYTE*)pCode + ALIGN_UP(blockSize, sizeof(void*)));
}
@@ -2550,7 +2560,7 @@ CodeHeader* EEJitManager::allocCode(MethodDesc* pMD, size_t blockSize, CorJitAll
RETURN(pCodeHdr);
}
-EEJitManager::DomainCodeHeapList *EEJitManager::GetCodeHeapList(MethodDesc *pMD, LoaderAllocator *pAllocator, BOOL fDynamicOnly)
+EEJitManager::DomainCodeHeapList *EEJitManager::GetCodeHeapList(CodeHeapRequestInfo *pInfo, LoaderAllocator *pAllocator, BOOL fDynamicOnly)
{
CONTRACTL {
NOTHROW;
@@ -2564,7 +2574,7 @@ EEJitManager::DomainCodeHeapList *EEJitManager::GetCodeHeapList(MethodDesc *pMD,
// get the appropriate list of heaps
// pMD is NULL for NGen modules during Module::LoadTokenTables
- if (fDynamicOnly || (pMD != NULL && pMD->IsLCGMethod()))
+ if (fDynamicOnly || (pInfo != NULL && pInfo->IsDynamicDomain()))
{
ppList = m_DynamicDomainCodeHeaps.Table();
count = m_DynamicDomainCodeHeaps.Count();
@@ -2605,7 +2615,7 @@ HeapList* EEJitManager::GetCodeHeap(CodeHeapRequestInfo *pInfo)
// loop through the m_DomainCodeHeaps to find the AppDomain
// if not found, then create it
- DomainCodeHeapList *pList = GetCodeHeapList(pInfo->m_pMD, pInfo->m_pAllocator);
+ DomainCodeHeapList *pList = GetCodeHeapList(pInfo, pInfo->m_pAllocator);
if (pList)
{
// Set pResult to the largest non-full HeapList
@@ -2726,7 +2736,7 @@ bool EEJitManager::CanUseCodeHeap(CodeHeapRequestInfo *pInfo, HeapList *pCodeHea
}
}
- return retVal;
+ return retVal;
}
EEJitManager::DomainCodeHeapList * EEJitManager::CreateCodeHeapList(CodeHeapRequestInfo *pInfo)
@@ -5048,6 +5058,10 @@ DONE:
emitBackToBackJump(jumpStub, (void*) target);
+#ifdef FEATURE_PERFMAP
+ PerfMap::LogStubs(__FUNCTION__, "emitBackToBackJump", (PCODE)jumpStub, BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
+#endif
+
// We always add the new jumpstub to the jumpStubCache
//
_ASSERTE(pJumpStubCache != NULL);
diff --git a/src/vm/codeman.h b/src/vm/codeman.h
index f85eeb59db..afef682e2a 100644
--- a/src/vm/codeman.h
+++ b/src/vm/codeman.h
@@ -369,6 +369,8 @@ struct CodeHeapRequestInfo
bool m_isCollectible;
bool IsDynamicDomain() { return m_isDynamicDomain; }
+ void SetDynamicDomain() { m_isDynamicDomain = true; }
+
bool IsCollectible() { return m_isCollectible; }
size_t getRequestSize() { return m_requestSize; }
@@ -1095,7 +1097,7 @@ private :
size_t header, size_t blockSize, unsigned align,
HeapList ** ppCodeHeap /* Writeback, Can be null */ );
- DomainCodeHeapList *GetCodeHeapList(MethodDesc *pMD, LoaderAllocator *pAllocator, BOOL fDynamicOnly = FALSE);
+ DomainCodeHeapList *GetCodeHeapList(CodeHeapRequestInfo *pInfo, LoaderAllocator *pAllocator, BOOL fDynamicOnly = FALSE);
DomainCodeHeapList *CreateCodeHeapList(CodeHeapRequestInfo *pInfo);
LoaderHeap* GetJitMetaHeap(MethodDesc *pMD);
#endif // !CROSSGEN_COMPILE
diff --git a/src/vm/codepitchingmanager.cpp b/src/vm/codepitchingmanager.cpp
new file mode 100644
index 0000000000..521c101b0f
--- /dev/null
+++ b/src/vm/codepitchingmanager.cpp
@@ -0,0 +1,522 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// ===========================================================================
+// File: CodePitchingManager.cpp
+//
+
+// ===========================================================================
+// This file contains the implementation for code pitching.
+// Its distinctive features and algorithm are:
+//
+// 1. All its code is under #if defined(FEATURE_JIT_PITCHING) and doesn't mess up with other code
+// 2. This feature is working only if the options INTERNAL_JitPitchEnabled != 0 and INTERNAL_JitPitchMemThreshold > 0
+// 3. Jitted code can be pitched only for methods that are not Dynamic, FCall or Virtual
+// 4. If the size of the generated native code exceeds the value of INTERNAL_JitDPitchMethodSizeThreshold this code is
+// placed in the special heap code list. Each heap block in this list stores the code for only one method and has the
+// sufficient size for the code of a method aligned to 4K. The pointers to such methods are stored in the
+// "PitchingCandidateMethods" hash map.
+// 5. If the entrypoint of a method is backpatched this method is excluded from the "PitchingCandidateMethods" hash map
+// and stored in "NotForPitchingMethods" hashmap.
+// 6. When the total size of the generated native code exceeds the value of INTERNAL_JitPitchMemThreshold option, the
+// execution of the program is stopped and stack frames for all the threads are inspected and pointers to methods
+// being executed are stored in the "ExecutedMethods" hash map
+// 7. The code for all the methods from the "PitchingCandidateMethods" that are not in the "ExecutedMethods" is pitched.
+// (All heap blocks for these methods are set in the initial state and can be reused for newly compiled methods, pointers
+// to the code for non-executed methods are set to nullptr).
+// 8. If the code for the given method is pitched once, this method is stored in the "NotForPitchingMethods" hashmap. Thus,
+// if this method is compiled the second time, it is considered as called repeatedly, therefore, pitching for it is inexpedient,
+// and the newly compiled code stored in the usual heap.
+// 9. The coreclr code with this feature is built by the option
+// ./build.sh cmakeargs -DFEATURE_JIT_PITCHING=true
+// ===========================================================================
+
+#include "common.h"
+
+#ifndef DACCESS_COMPILE
+
+#if defined(FEATURE_JIT_PITCHING)
+
+#include "nibblemapmacros.h"
+#include "threadsuspend.h"
+
+static PtrHashMap* s_pPitchingCandidateMethods = nullptr;
+static PtrHashMap* s_pPitchingCandidateSizes = nullptr;
+static SimpleRWLock* s_pPitchingCandidateMethodsLock = nullptr;
+
+static PtrHashMap* s_pExecutedMethods = nullptr;
+static SimpleRWLock* s_pExecutedMethodsLock = nullptr;
+
+static PtrHashMap* s_pNotForPitchingMethods = nullptr;
+static SimpleRWLock* s_pNotForPitchingMethodsLock = nullptr;
+
+#ifdef _DEBUG
+static PtrHashMap* s_pPitchedMethods = nullptr;
+static SimpleRWLock* s_pPitchedMethodsLock = nullptr;
+#endif
+
+static ULONG s_totalNCSize = 0;
+static SimpleRWLock* s_totalNCSizeLock = nullptr;
+
+static ULONG s_jitPitchedBytes = 0;
+
+static INT64 s_JitPitchLastTick = 0;
+
+static bool s_JitPitchInitialized = false;
+
+
+static BOOL IsOwnerOfRWLock(LPVOID lock)
+{
+ // @TODO - SimpleRWLock does not have knowledge of which thread gets the writer
+ // lock, so no way to verify
+ return TRUE;
+}
+
+static void CreateRWLock(SimpleRWLock** lock)
+{
+ if (*lock == nullptr)
+ {
+ void *pLockSpace = SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(SimpleRWLock)));
+ SimpleRWLock *pLock = new (pLockSpace) SimpleRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT);
+
+ if (FastInterlockCompareExchangePointer(lock, pLock, NULL) != NULL)
+ SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()->BackoutMem(pLockSpace, sizeof(SimpleRWLock));
+ }
+}
+
+static PtrHashMap* CreateHashMap(SimpleRWLock* rwLock)
+{
+ PtrHashMap *pMap = new (SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()) PtrHashMap();
+ LockOwner lock = {rwLock, IsOwnerOfRWLock};
+ pMap->Init(32, nullptr, FALSE, &lock);
+ return pMap;
+}
+
+static void InitializeJitPitching()
+{
+ if (!s_JitPitchInitialized)
+ {
+ CreateRWLock(&s_pNotForPitchingMethodsLock);
+ CreateRWLock(&s_pPitchingCandidateMethodsLock);
+ CreateRWLock(&s_totalNCSizeLock);
+
+ {
+ SimpleReadLockHolder srlh(s_pNotForPitchingMethodsLock);
+ if (s_pNotForPitchingMethods == nullptr)
+ {
+ s_pNotForPitchingMethods = CreateHashMap(s_pNotForPitchingMethodsLock);
+ }
+ }
+
+ {
+ SimpleReadLockHolder srlh(s_pPitchingCandidateMethodsLock);
+ if (s_pPitchingCandidateMethods == nullptr)
+ {
+ s_pPitchingCandidateMethods = CreateHashMap(s_pPitchingCandidateMethodsLock);
+ s_pPitchingCandidateSizes = CreateHashMap(s_pPitchingCandidateMethodsLock);
+ }
+ }
+
+ s_JitPitchInitialized = true;
+ }
+}
+
+static COUNT_T GetFullHash(MethodDesc* pMD)
+{
+ const char *moduleName = pMD->GetModule()->GetSimpleName();
+
+ COUNT_T hash = HashStringA(moduleName); // Start the hash with the Module name
+
+ SString className, methodName, methodSig;
+
+ pMD->GetMethodInfo(className, methodName, methodSig);
+
+ hash = HashCOUNT_T(hash, className.Hash()); // Hash in the name of the Class name
+ hash = HashCOUNT_T(hash, methodName.Hash()); // Hash in the name of the Method name
+ hash = HashCOUNT_T(hash, 0xffffffff & (ULONGLONG)pMD);
+
+ return hash;
+}
+
+bool MethodDesc::IsPitchable()
+{
+ if ((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchEnabled) == 0) ||
+ (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchMemThreshold) == 0))
+ return FALSE;
+
+ InitializeJitPitching();
+
+ if (IsLCGMethod() || IsVtableMethod() || IsInterface() || IsVirtual())
+ return FALSE;
+
+ _ASSERTE(s_pNotForPitchingMethodsLock != nullptr && s_pNotForPitchingMethods != nullptr);
+
+ {
+ SimpleReadLockHolder srlh(s_pNotForPitchingMethodsLock);
+ UPTR key = (UPTR)GetFullHash(this);
+ MethodDesc *pFound = (MethodDesc *)s_pNotForPitchingMethods->LookupValue(key, (LPVOID)this);
+ if (pFound != (MethodDesc *)INVALIDENTRY)
+ {
+ return FALSE;
+ }
+ }
+ return TRUE;
+}
+
+EXTERN_C bool LookupOrCreateInNotForPitching(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ if (pMD != nullptr && pMD->IsPitchable())
+ {
+ UPTR key = (UPTR)GetFullHash(pMD);
+
+ _ASSERTE(s_pNotForPitchingMethodsLock != nullptr && s_pNotForPitchingMethods != nullptr);
+
+ {
+ SimpleReadLockHolder srlh(s_pNotForPitchingMethodsLock);
+ MethodDesc *pFound = (MethodDesc *)s_pNotForPitchingMethods->LookupValue(key, (LPVOID)pMD);
+ if (pFound != (MethodDesc *)INVALIDENTRY)
+ return TRUE;
+ }
+
+ {
+ SimpleWriteLockHolder swlh(s_pNotForPitchingMethodsLock);
+ s_pNotForPitchingMethods->InsertValue(key, (LPVOID)pMD);
+ }
+ }
+ return FALSE;
+}
+
+static void LookupOrCreateInPitchingCandidate(MethodDesc* pMD, ULONG sizeOfCode)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ if (pMD == nullptr || !pMD->IsPitchable())
+ return;
+
+ PCODE prCode = pMD->GetPreImplementedCode();
+ if (prCode)
+ return;
+
+ if (!pMD->HasPrecode())
+ return;
+
+ UPTR key = (UPTR)GetFullHash(pMD);
+
+ _ASSERTE(s_pPitchingCandidateMethodsLock != nullptr && s_pPitchingCandidateMethods != nullptr);
+ _ASSERTE(s_pPitchingCandidateSizes);
+
+ {
+ // Try getting an existing value first.
+ SimpleReadLockHolder srlh(s_pPitchingCandidateMethodsLock);
+ MethodDesc *pFound = (MethodDesc *)s_pPitchingCandidateMethods->LookupValue(key, (LPVOID)pMD);
+ if (pFound != (MethodDesc *)INVALIDENTRY)
+ return;
+ }
+
+ {
+ SimpleWriteLockHolder swlh(s_pPitchingCandidateMethodsLock);
+ s_pPitchingCandidateMethods->InsertValue(key, (LPVOID)pMD);
+ s_pPitchingCandidateSizes->InsertValue(key, (LPVOID)((ULONGLONG)(sizeOfCode << 1)));
+#ifdef _DEBUG
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchPrintStat) != 0)
+ {
+ SString className, methodName, methodSig;
+ pMD->GetMethodInfo(className, methodName, methodSig);
+
+ StackScratchBuffer scratch;
+ const char* szClassName = className.GetUTF8(scratch);
+ const char* szMethodSig = methodSig.GetUTF8(scratch);
+
+ printf("Candidate %lld %s :: %s %s\n",
+ sizeOfCode, szClassName, pMD->GetName(), szMethodSig);
+ }
+#endif
+ }
+}
+
+EXTERN_C void DeleteFromPitchingCandidate(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ MODE_COOPERATIVE;
+ GC_TRIGGERS;
+ THROWS;
+ }
+ CONTRACTL_END;
+
+ if (pMD != nullptr && pMD->IsPitchable())
+ {
+ PCODE pCode = pMD->GetPreImplementedCode();
+
+ if (pCode)
+ return;
+
+ _ASSERTE(s_pPitchingCandidateMethodsLock != nullptr && s_pPitchingCandidateMethods != nullptr);
+ _ASSERTE(s_pPitchingCandidateSizes != nullptr);
+
+ UPTR key = (UPTR)GetFullHash(pMD);
+ {
+ SimpleReadLockHolder srlh(s_pPitchingCandidateMethodsLock);
+ MethodDesc *pFound = (MethodDesc *)s_pPitchingCandidateMethods->LookupValue(key, (LPVOID)pMD);
+ if (pFound == (MethodDesc *)INVALIDENTRY)
+ return;
+ }
+
+ {
+ SimpleWriteLockHolder swlh(s_pPitchingCandidateMethodsLock);
+ s_pPitchingCandidateMethods->DeleteValue(key, (LPVOID)pMD);
+ }
+
+ LPVOID pitchedBytes;
+ {
+ SimpleReadLockHolder srlh(s_pPitchingCandidateMethodsLock);
+ pitchedBytes = s_pPitchingCandidateSizes->LookupValue(key, nullptr);
+ _ASSERTE(pitchedBytes != (LPVOID)INVALIDENTRY);
+ }
+ {
+ SimpleWriteLockHolder swlh(s_pPitchingCandidateMethodsLock);
+ s_pPitchingCandidateSizes->DeleteValue(key, pitchedBytes);
+ }
+ }
+}
+
+EXTERN_C void MarkMethodNotPitchingCandidate(MethodDesc* pMD)
+{
+
+ DeleteFromPitchingCandidate(pMD);
+ (void)LookupOrCreateInNotForPitching(pMD);
+}
+
+StackWalkAction CrawlFrameVisitor(CrawlFrame* pCf, Thread* pMdThread)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodDesc* pMD = pCf->GetFunction();
+
+ // Filter out methods we don't care about
+ if (pMD == nullptr || !pMD->IsPitchable())
+ {
+ return SWA_CONTINUE;
+ }
+
+ if (s_pExecutedMethods == nullptr)
+ {
+ PtrHashMap *pMap = new (SystemDomain::GetGlobalLoaderAllocator()->GetLowFrequencyHeap()) PtrHashMap();
+ pMap->Init(TRUE, nullptr);
+ s_pExecutedMethods = pMap;
+ }
+
+ UPTR key = (UPTR)GetFullHash(pMD);
+ MethodDesc *pFound = (MethodDesc *)s_pExecutedMethods->LookupValue(key, (LPVOID)pMD);
+ if (pFound == (MethodDesc *)INVALIDENTRY)
+ {
+ s_pExecutedMethods->InsertValue(key, (LPVOID)pMD);
+ }
+
+ return SWA_CONTINUE;
+}
+
+// Visitor for stack walk callback.
+StackWalkAction StackWalkCallback(CrawlFrame* pCf, VOID* data)
+{
+ WRAPPER_NO_CONTRACT;
+
+ // WalkInfo* info = (WalkInfo*) data;
+ return CrawlFrameVisitor(pCf, (Thread *)data);
+}
+
+static ULONGLONG s_PitchedMethodCounter = 0;
+void MethodDesc::PitchNativeCode()
+{
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+ g_IBCLogger.LogMethodDescAccess(this);
+
+ if (!IsPitchable())
+ return;
+
+ PCODE pCode = GetNativeCode();
+
+ if (!pCode)
+ return;
+
+ _ASSERTE(HasPrecode());
+
+ _ASSERTE(HasNativeCode());
+
+ ++s_PitchedMethodCounter;
+
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchMinVal) > s_PitchedMethodCounter)
+ {
+ return;
+ }
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchMaxVal) < s_PitchedMethodCounter)
+ {
+ return;
+ }
+
+ if (LookupOrCreateInNotForPitching(this))
+ return;
+
+ MethodTable * pMT = GetMethodTable();
+ _ASSERTE(pMT != nullptr);
+
+ CodeHeader* pCH = ((CodeHeader*)(pCode & ~1)) - 1;
+ _ASSERTE(pCH->GetMethodDesc() == this);
+
+ HostCodeHeap* pHeap = HostCodeHeap::GetCodeHeap((TADDR)pCode);
+ pHeap->GetJitManager()->FreeCodeMemory(pHeap, (void*)pCode);
+
+ ClearFlagsOnUpdate();
+
+ _ASSERTE(HasPrecode());
+ GetPrecode()->Reset();
+
+ if (HasNativeCodeSlot())
+ {
+ RelativePointer<TADDR> *pRelPtr = (RelativePointer<TADDR> *)GetAddrOfNativeCodeSlot();
+ pRelPtr->SetValueMaybeNull(NULL);
+ }
+ else
+ {
+#ifdef FEATURE_INTERPRETER
+ SetNativeCodeInterlocked(NULL, NULL, FALSE);
+#else
+ SetNativeCodeInterlocked(NULL, NULL);
+#endif
+ }
+
+ _ASSERTE(!HasNativeCode());
+
+ UPTR key = (UPTR)GetFullHash(this);
+ ULONGLONG pitchedBytes;
+ {
+ SimpleReadLockHolder srlh(s_pPitchingCandidateMethodsLock);
+ pitchedBytes = (ULONGLONG)s_pPitchingCandidateSizes->LookupValue(key, nullptr);
+ _ASSERTE(pitchedBytes != (ULONGLONG)INVALIDENTRY);
+ if (pitchedBytes == (ULONGLONG)INVALIDENTRY)
+ pitchedBytes = 0;
+ s_jitPitchedBytes += (pitchedBytes >> 1);
+ }
+ {
+ SimpleWriteLockHolder swlh(s_pPitchingCandidateMethodsLock);
+ s_pPitchingCandidateMethods->DeleteValue(key, (LPVOID)this);
+ if (pitchedBytes != 0)
+ s_pPitchingCandidateSizes->DeleteValue(key, (LPVOID)pitchedBytes);
+ }
+
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchPrintStat) != 0)
+ {
+ SString className, methodName, methodSig;
+ GetMethodInfo(className, methodName, methodSig);
+
+ StackScratchBuffer scratch;
+ const char* szClassName = className.GetUTF8(scratch);
+ const char* szMethodSig = methodSig.GetUTF8(scratch);
+
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchPrintStat) != 0)
+ {
+ printf("Pitched %lld %lld %s :: %s %s\n",
+ s_PitchedMethodCounter, pitchedBytes, szClassName, GetName(), szMethodSig);
+ }
+ }
+
+ DACNotify::DoJITPitchingNotification(this);
+}
+
+EXTERN_C void CheckStacksAndPitch()
+{
+ if ((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchEnabled) != 0) &&
+ (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchMemThreshold) != 0) &&
+ (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchTimeInterval) == 0 ||
+ ((::GetTickCount64() - s_JitPitchLastTick) > CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchTimeInterval))))
+ {
+ SimpleReadLockHolder srlh(s_totalNCSizeLock);
+
+ if ((s_totalNCSize - s_jitPitchedBytes) > CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchMemThreshold) &&
+ s_pPitchingCandidateMethods != nullptr)
+ {
+ EX_TRY
+ {
+ // Suspend the runtime.
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_OTHER);
+
+ // Walk all other threads.
+ Thread* pThread = nullptr;
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != nullptr)
+ {
+ pThread->StackWalkFrames(StackWalkCallback, (VOID *)pThread, ALLOW_ASYNC_STACK_WALK);
+ }
+
+ if (s_pExecutedMethods)
+ {
+ PtrHashMap::PtrIterator i = s_pPitchingCandidateMethods->begin();
+ while (!i.end())
+ {
+ MethodDesc *pMD = (MethodDesc *) i.GetValue();
+ UPTR key = (UPTR)GetFullHash(pMD);
+ MethodDesc *pFound = (MethodDesc *)s_pExecutedMethods->LookupValue(key, (LPVOID)pMD);
+ ++i;
+ if (pFound == (MethodDesc *)INVALIDENTRY)
+ {
+ pMD->PitchNativeCode();
+ }
+ }
+ s_pExecutedMethods->Clear();
+ delete s_pExecutedMethods;
+ s_pExecutedMethods = nullptr;
+ s_pPitchingCandidateMethods->Compact();
+ s_pPitchingCandidateSizes->Compact();
+ }
+
+ s_JitPitchLastTick = ::GetTickCount64();
+
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions);
+ }
+ }
+}
+
+EXTERN_C void SavePitchingCandidate(MethodDesc* pMD, ULONG sizeOfCode)
+{
+ if (pMD && pMD->IsPitchable() && CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchMethodSizeThreshold) < sizeOfCode)
+ {
+ LookupOrCreateInPitchingCandidate(pMD, sizeOfCode);
+ }
+ if (sizeOfCode > 0)
+ {
+ SimpleWriteLockHolder swlh(s_totalNCSizeLock);
+ s_totalNCSize += sizeOfCode;
+ if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchPrintStat) != 0)
+ printf("jitted %lld (bytes) pitched %lld (bytes)\n", s_totalNCSize, s_jitPitchedBytes);
+ }
+}
+#endif
+
+#endif
diff --git a/src/vm/codeversion.cpp b/src/vm/codeversion.cpp
new file mode 100644
index 0000000000..10d3013f35
--- /dev/null
+++ b/src/vm/codeversion.cpp
@@ -0,0 +1,2862 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// ===========================================================================
+// File: CodeVersion.cpp
+//
+// ===========================================================================
+
+#include "common.h"
+#include "codeversion.h"
+
+#ifdef FEATURE_CODE_VERSIONING
+#include "threadsuspend.h"
+#include "methoditer.h"
+#include "../debug/ee/debugger.h"
+#include "../debug/ee/walker.h"
+#include "../debug/ee/controller.h"
+#endif // FEATURE_CODE_VERSIONING
+
+#ifndef FEATURE_CODE_VERSIONING
+
+//
+// When not using code versioning we've got a minimal implementation of
+// NativeCodeVersion that simply wraps a MethodDesc* with no additional
+// versioning information
+//
+
+NativeCodeVersion::NativeCodeVersion(const NativeCodeVersion & rhs) : m_pMethod(rhs.m_pMethod) {}
+NativeCodeVersion::NativeCodeVersion(PTR_MethodDesc pMethod) : m_pMethod(pMethod) {}
+BOOL NativeCodeVersion::IsNull() const { return m_pMethod == NULL; }
+PTR_MethodDesc NativeCodeVersion::GetMethodDesc() const { return m_pMethod; }
+PCODE NativeCodeVersion::GetNativeCode() const { return m_pMethod->GetNativeCode(); }
+NativeCodeVersionId NativeCodeVersion::GetVersionId() const { return 0; }
+ReJITID NativeCodeVersion::GetILCodeVersionId() const; { return 0; }
+ILCodeVersion NativeCodeVersion::GetILCodeVersion() const { return ILCodeVersion(m_pMethod); }
+#ifndef DACCESS_COMPILE
+BOOL NativeCodeVersion::SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected) { return m_pMethod->SetNativeCodeInterlocked(pCode, pExpected); }
+#endif
+bool NativeCodeVersion::operator==(const NativeCodeVersion & rhs) const { return m_pMethod == rhs.m_pMethod; }
+bool NativeCodeVersion::operator!=(const NativeCodeVersion & rhs) const { return !operator==(rhs); }
+
+
+#else // FEATURE_CODE_VERSIONING
+
+
+// This HRESULT is only used as a private implementation detail. If it escapes through public APIS
+// it is a bug. Corerror.xml has a comment in it reserving this value for our use but it doesn't
+// appear in the public headers.
+
+#define CORPROF_E_RUNTIME_SUSPEND_REQUIRED 0x80131381
+
+#ifndef DACCESS_COMPILE
+NativeCodeVersionNode::NativeCodeVersionNode(NativeCodeVersionId id, MethodDesc* pMethodDesc, ReJITID parentId) :
+ m_pNativeCode(NULL),
+ m_pMethodDesc(pMethodDesc),
+ m_parentId(parentId),
+ m_pNextMethodDescSibling(NULL),
+ m_id(id),
+ m_optTier(NativeCodeVersion::OptimizationTier0),
+ m_flags(0)
+{}
+#endif
+
+#ifdef DEBUG
+BOOL NativeCodeVersionNode::LockOwnedByCurrentThread() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetMethodDesc()->GetCodeVersionManager()->LockOwnedByCurrentThread();
+}
+#endif //DEBUG
+
+PTR_MethodDesc NativeCodeVersionNode::GetMethodDesc() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMethodDesc;
+}
+
+PCODE NativeCodeVersionNode::GetNativeCode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pNativeCode;
+}
+
+ReJITID NativeCodeVersionNode::GetILVersionId() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_parentId;
+}
+
+ILCodeVersion NativeCodeVersionNode::GetILCodeVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef DEBUG
+ if (GetILVersionId() != 0)
+ {
+ _ASSERTE(LockOwnedByCurrentThread());
+ }
+#endif
+ PTR_MethodDesc pMD = GetMethodDesc();
+ return pMD->GetCodeVersionManager()->GetILCodeVersion(pMD, GetILVersionId());
+}
+
+NativeCodeVersionId NativeCodeVersionNode::GetVersionId() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_id;
+}
+
+#ifndef DACCESS_COMPILE
+BOOL NativeCodeVersionNode::SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected)
+{
+ LIMITED_METHOD_CONTRACT;
+ return FastInterlockCompareExchangePointer(&m_pNativeCode,
+ (TADDR&)pCode, (TADDR&)pExpected) == (TADDR&)pExpected;
+}
+#endif
+
+BOOL NativeCodeVersionNode::IsActiveChildVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return (m_flags & IsActiveChildFlag) != 0;
+}
+
+#ifndef DACCESS_COMPILE
+void NativeCodeVersionNode::SetActiveChildFlag(BOOL isActive)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ if (isActive)
+ {
+ m_flags |= IsActiveChildFlag;
+ }
+ else
+ {
+ m_flags &= ~IsActiveChildFlag;
+ }
+}
+#endif
+
+
+#ifdef FEATURE_TIERED_COMPILATION
+NativeCodeVersion::OptimizationTier NativeCodeVersionNode::GetOptimizationTier() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_optTier.Load();
+}
+#ifndef DACCESS_COMPILE
+void NativeCodeVersionNode::SetOptimizationTier(NativeCodeVersion::OptimizationTier tier)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ m_optTier.Store(tier);
+}
+#endif
+#endif // FEATURE_TIERED_COMPILATION
+
+NativeCodeVersion::NativeCodeVersion() :
+ m_storageKind(StorageKind::Unknown)
+{}
+
+NativeCodeVersion::NativeCodeVersion(const NativeCodeVersion & rhs) :
+ m_storageKind(rhs.m_storageKind)
+{
+ if(m_storageKind == StorageKind::Explicit)
+ {
+ m_pVersionNode = rhs.m_pVersionNode;
+ }
+ else if(m_storageKind == StorageKind::Synthetic)
+ {
+ m_synthetic = rhs.m_synthetic;
+ }
+}
+
+NativeCodeVersion::NativeCodeVersion(PTR_NativeCodeVersionNode pVersionNode) :
+ m_storageKind(pVersionNode != NULL ? StorageKind::Explicit : StorageKind::Unknown),
+ m_pVersionNode(pVersionNode)
+{}
+
+NativeCodeVersion::NativeCodeVersion(PTR_MethodDesc pMethod) :
+ m_storageKind(pMethod != NULL ? StorageKind::Synthetic : StorageKind::Unknown)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ m_synthetic.m_pMethodDesc = pMethod;
+}
+
+BOOL NativeCodeVersion::IsNull() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_storageKind == StorageKind::Unknown;
+}
+
+BOOL NativeCodeVersion::IsDefaultVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_storageKind == StorageKind::Synthetic;
+}
+
+PTR_MethodDesc NativeCodeVersion::GetMethodDesc() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetMethodDesc();
+ }
+ else
+ {
+ return m_synthetic.m_pMethodDesc;
+ }
+}
+
+PCODE NativeCodeVersion::GetNativeCode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetNativeCode();
+ }
+ else
+ {
+ return GetMethodDesc()->GetNativeCode();
+ }
+}
+
+ReJITID NativeCodeVersion::GetILCodeVersionId() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetILVersionId();
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+ILCodeVersion NativeCodeVersion::GetILCodeVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetILCodeVersion();
+ }
+ else
+ {
+ PTR_MethodDesc pMethod = GetMethodDesc();
+ return ILCodeVersion(dac_cast<PTR_Module>(pMethod->GetModule()), pMethod->GetMemberDef());
+ }
+}
+
+NativeCodeVersionId NativeCodeVersion::GetVersionId() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetVersionId();
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+BOOL NativeCodeVersion::SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->SetNativeCodeInterlocked(pCode, pExpected);
+ }
+ else
+ {
+ return GetMethodDesc()->SetNativeCodeInterlocked(pCode, pExpected);
+ }
+}
+#endif
+
+BOOL NativeCodeVersion::IsActiveChildVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->IsActiveChildVersion();
+ }
+ else
+ {
+ MethodDescVersioningState* pMethodVersioningState = GetMethodDescVersioningState();
+ if (pMethodVersioningState == NULL)
+ {
+ return TRUE;
+ }
+ return pMethodVersioningState->IsDefaultVersionActiveChild();
+ }
+}
+
+PTR_MethodDescVersioningState NativeCodeVersion::GetMethodDescVersioningState() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ PTR_MethodDesc pMethodDesc = GetMethodDesc();
+ CodeVersionManager* pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
+ return pCodeVersionManager->GetMethodDescVersioningState(pMethodDesc);
+}
+
+#ifndef DACCESS_COMPILE
+void NativeCodeVersion::SetActiveChildFlag(BOOL isActive)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ AsNode()->SetActiveChildFlag(isActive);
+ }
+ else
+ {
+ MethodDescVersioningState* pMethodVersioningState = GetMethodDescVersioningState();
+ pMethodVersioningState->SetDefaultVersionActiveChildFlag(isActive);
+ }
+}
+
+MethodDescVersioningState* NativeCodeVersion::GetMethodDescVersioningState()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ MethodDesc* pMethodDesc = GetMethodDesc();
+ CodeVersionManager* pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
+ return pCodeVersionManager->GetMethodDescVersioningState(pMethodDesc);
+}
+#endif
+
+#ifdef FEATURE_TIERED_COMPILATION
+NativeCodeVersion::OptimizationTier NativeCodeVersion::GetOptimizationTier() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetOptimizationTier();
+ }
+ else
+ {
+ return NativeCodeVersion::OptimizationTier0;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+void NativeCodeVersion::SetOptimizationTier(NativeCodeVersion::OptimizationTier tier)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ AsNode()->SetOptimizationTier(tier);
+ }
+ else
+ {
+ _ASSERTE(!"Do not call SetOptimizationTier on default code versions - these versions are immutable");
+ }
+}
+#endif
+#endif
+
+PTR_NativeCodeVersionNode NativeCodeVersion::AsNode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return m_pVersionNode;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+PTR_NativeCodeVersionNode NativeCodeVersion::AsNode()
+{
+ LIMITED_METHOD_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return m_pVersionNode;
+ }
+ else
+ {
+ return NULL;
+ }
+}
+#endif
+
+bool NativeCodeVersion::operator==(const NativeCodeVersion & rhs) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return (rhs.m_storageKind == StorageKind::Explicit) &&
+ (rhs.AsNode() == AsNode());
+ }
+ else if (m_storageKind == StorageKind::Synthetic)
+ {
+ return (rhs.m_storageKind == StorageKind::Synthetic) &&
+ (m_synthetic.m_pMethodDesc == rhs.m_synthetic.m_pMethodDesc);
+ }
+ else
+ {
+ return rhs.m_storageKind == StorageKind::Unknown;
+ }
+}
+bool NativeCodeVersion::operator!=(const NativeCodeVersion & rhs) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return !operator==(rhs);
+}
+
+NativeCodeVersionCollection::NativeCodeVersionCollection(PTR_MethodDesc pMethodDescFilter, ILCodeVersion ilCodeFilter) :
+ m_pMethodDescFilter(pMethodDescFilter),
+ m_ilCodeFilter(ilCodeFilter)
+{
+}
+
+NativeCodeVersionIterator NativeCodeVersionCollection::Begin()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return NativeCodeVersionIterator(this);
+}
+NativeCodeVersionIterator NativeCodeVersionCollection::End()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return NativeCodeVersionIterator(NULL);
+}
+
+NativeCodeVersionIterator::NativeCodeVersionIterator(NativeCodeVersionCollection* pNativeCodeVersionCollection) :
+ m_stage(IterationStage::Initial),
+ m_pCollection(pNativeCodeVersionCollection),
+ m_pLinkedListCur(dac_cast<PTR_NativeCodeVersionNode>(nullptr))
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ First();
+}
+void NativeCodeVersionIterator::First()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_pCollection == NULL)
+ {
+ m_stage = IterationStage::End;
+ }
+ Next();
+}
+void NativeCodeVersionIterator::Next()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_stage == IterationStage::Initial)
+ {
+ ILCodeVersion ilCodeFilter = m_pCollection->m_ilCodeFilter;
+ m_stage = IterationStage::ImplicitCodeVersion;
+ if (ilCodeFilter.IsNull() || ilCodeFilter.IsDefaultVersion())
+ {
+ m_cur = NativeCodeVersion(m_pCollection->m_pMethodDescFilter);
+ return;
+ }
+ }
+ if (m_stage == IterationStage::ImplicitCodeVersion)
+ {
+ m_stage = IterationStage::LinkedList;
+ CodeVersionManager* pCodeVersionManager = m_pCollection->m_pMethodDescFilter->GetCodeVersionManager();
+ MethodDescVersioningState* pMethodDescVersioningState = pCodeVersionManager->GetMethodDescVersioningState(m_pCollection->m_pMethodDescFilter);
+ if (pMethodDescVersioningState == NULL)
+ {
+ m_pLinkedListCur = NULL;
+ }
+ else
+ {
+ ILCodeVersion ilCodeFilter = m_pCollection->m_ilCodeFilter;
+ m_pLinkedListCur = pMethodDescVersioningState->GetFirstVersionNode();
+ while (m_pLinkedListCur != NULL && !ilCodeFilter.IsNull() && ilCodeFilter.GetVersionId() != m_pLinkedListCur->GetILVersionId())
+ {
+ m_pLinkedListCur = m_pLinkedListCur->m_pNextMethodDescSibling;
+ }
+ }
+ if (m_pLinkedListCur != NULL)
+ {
+ m_cur = NativeCodeVersion(m_pLinkedListCur);
+ return;
+ }
+ }
+ if (m_stage == IterationStage::LinkedList)
+ {
+ if (m_pLinkedListCur != NULL)
+ {
+ ILCodeVersion ilCodeFilter = m_pCollection->m_ilCodeFilter;
+ do
+ {
+ m_pLinkedListCur = m_pLinkedListCur->m_pNextMethodDescSibling;
+ } while (m_pLinkedListCur != NULL && !ilCodeFilter.IsNull() && ilCodeFilter.GetVersionId() != m_pLinkedListCur->GetILVersionId());
+ }
+ if (m_pLinkedListCur != NULL)
+ {
+ m_cur = NativeCodeVersion(m_pLinkedListCur);
+ return;
+ }
+ else
+ {
+ m_stage = IterationStage::End;
+ m_cur = NativeCodeVersion();
+ }
+ }
+}
+const NativeCodeVersion & NativeCodeVersionIterator::Get() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_cur;
+}
+bool NativeCodeVersionIterator::Equal(const NativeCodeVersionIterator &i) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_cur == i.m_cur;
+}
+
+ILCodeVersionNode::ILCodeVersionNode() :
+ m_pModule(dac_cast<PTR_Module>(nullptr)),
+ m_methodDef(0),
+ m_rejitId(0),
+ m_pNextILVersionNode(dac_cast<PTR_ILCodeVersionNode>(nullptr)),
+ m_rejitState(ILCodeVersion::kStateRequested),
+ m_pIL(dac_cast<PTR_COR_ILMETHOD>(nullptr)),
+ m_jitFlags(0)
+{}
+
+#ifndef DACCESS_COMPILE
+ILCodeVersionNode::ILCodeVersionNode(Module* pModule, mdMethodDef methodDef, ReJITID id) :
+ m_pModule(pModule),
+ m_methodDef(methodDef),
+ m_rejitId(id),
+ m_pNextILVersionNode(dac_cast<PTR_ILCodeVersionNode>(nullptr)),
+ m_rejitState(ILCodeVersion::kStateRequested),
+ m_pIL(nullptr),
+ m_jitFlags(0)
+{}
+#endif
+
+#ifdef DEBUG
+BOOL ILCodeVersionNode::LockOwnedByCurrentThread() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return GetModule()->GetCodeVersionManager()->LockOwnedByCurrentThread();
+}
+#endif //DEBUG
+
+PTR_Module ILCodeVersionNode::GetModule() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pModule;
+}
+
+mdMethodDef ILCodeVersionNode::GetMethodDef() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_methodDef;
+}
+
+ReJITID ILCodeVersionNode::GetVersionId() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_rejitId;
+}
+
+ILCodeVersion::RejitFlags ILCodeVersionNode::GetRejitState() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_rejitState.Load();
+}
+
+PTR_COR_ILMETHOD ILCodeVersionNode::GetIL() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return dac_cast<PTR_COR_ILMETHOD>(m_pIL.Load());
+}
+
+DWORD ILCodeVersionNode::GetJitFlags() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_jitFlags.Load();
+}
+
+const InstrumentedILOffsetMapping* ILCodeVersionNode::GetInstrumentedILMap() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return &m_instrumentedILMap;
+}
+
+PTR_ILCodeVersionNode ILCodeVersionNode::GetNextILVersionNode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return m_pNextILVersionNode;
+}
+
+#ifndef DACCESS_COMPILE
+void ILCodeVersionNode::SetRejitState(ILCodeVersion::RejitFlags newState)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_rejitState.Store(newState);
+}
+
+void ILCodeVersionNode::SetIL(COR_ILMETHOD* pIL)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_pIL.Store(pIL);
+}
+
+void ILCodeVersionNode::SetJitFlags(DWORD flags)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_jitFlags.Store(flags);
+}
+
+void ILCodeVersionNode::SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ m_instrumentedILMap.SetMappingInfo(cMap, rgMap);
+}
+
+void ILCodeVersionNode::SetNextILVersionNode(ILCodeVersionNode* pNextILVersionNode)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ m_pNextILVersionNode = pNextILVersionNode;
+}
+#endif
+
+ILCodeVersion::ILCodeVersion() :
+ m_storageKind(StorageKind::Unknown)
+{}
+
+ILCodeVersion::ILCodeVersion(const ILCodeVersion & ilCodeVersion) :
+ m_storageKind(ilCodeVersion.m_storageKind)
+{
+ if(m_storageKind == StorageKind::Explicit)
+ {
+ m_pVersionNode = ilCodeVersion.m_pVersionNode;
+ }
+ else if(m_storageKind == StorageKind::Synthetic)
+ {
+ m_synthetic = ilCodeVersion.m_synthetic;
+ }
+}
+
+ILCodeVersion::ILCodeVersion(PTR_ILCodeVersionNode pILCodeVersionNode) :
+ m_storageKind(pILCodeVersionNode != NULL ? StorageKind::Explicit : StorageKind::Unknown),
+ m_pVersionNode(pILCodeVersionNode)
+{}
+
+ILCodeVersion::ILCodeVersion(PTR_Module pModule, mdMethodDef methodDef) :
+ m_storageKind(pModule != NULL ? StorageKind::Synthetic : StorageKind::Unknown)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ m_synthetic.m_pModule = pModule;
+ m_synthetic.m_methodDef = methodDef;
+}
+
+bool ILCodeVersion::operator==(const ILCodeVersion & rhs) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return (rhs.m_storageKind == StorageKind::Explicit) &&
+ (AsNode() == rhs.AsNode());
+ }
+ else if (m_storageKind == StorageKind::Synthetic)
+ {
+ return (rhs.m_storageKind == StorageKind::Synthetic) &&
+ (m_synthetic.m_pModule == rhs.m_synthetic.m_pModule) &&
+ (m_synthetic.m_methodDef == rhs.m_synthetic.m_methodDef);
+ }
+ else
+ {
+ return rhs.m_storageKind == StorageKind::Unknown;
+ }
+}
+
+BOOL ILCodeVersion::IsNull() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_storageKind == StorageKind::Unknown;
+}
+
+BOOL ILCodeVersion::IsDefaultVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_storageKind == StorageKind::Synthetic;
+}
+
+PTR_Module ILCodeVersion::GetModule() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetModule();
+ }
+ else
+ {
+ return m_synthetic.m_pModule;
+ }
+}
+
+mdMethodDef ILCodeVersion::GetMethodDef() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetMethodDef();
+ }
+ else
+ {
+ return m_synthetic.m_methodDef;
+ }
+}
+
+ReJITID ILCodeVersion::GetVersionId() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetVersionId();
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+NativeCodeVersionCollection ILCodeVersion::GetNativeCodeVersions(PTR_MethodDesc pClosedMethodDesc) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return NativeCodeVersionCollection(pClosedMethodDesc, *this);
+}
+
+NativeCodeVersion ILCodeVersion::GetActiveNativeCodeVersion(PTR_MethodDesc pClosedMethodDesc) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ NativeCodeVersionCollection versions = GetNativeCodeVersions(pClosedMethodDesc);
+ for (NativeCodeVersionIterator cur = versions.Begin(), end = versions.End(); cur != end; cur++)
+ {
+ if (cur->IsActiveChildVersion())
+ {
+ return *cur;
+ }
+ }
+ return NativeCodeVersion();
+}
+
+ILCodeVersion::RejitFlags ILCodeVersion::GetRejitState() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetRejitState();
+ }
+ else
+ {
+ return ILCodeVersion::kStateActive;
+ }
+}
+
+PTR_COR_ILMETHOD ILCodeVersion::GetIL() const
+{
+ CONTRACTL
+ {
+ THROWS; //GetILHeader throws
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+ MODE_ANY;
+ }
+ CONTRACTL_END
+
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetIL();
+ }
+ else
+ {
+ PTR_Module pModule = GetModule();
+ PTR_MethodDesc pMethodDesc = dac_cast<PTR_MethodDesc>(pModule->LookupMethodDef(GetMethodDef()));
+ if (pMethodDesc == NULL)
+ {
+ return NULL;
+ }
+ else
+ {
+ return dac_cast<PTR_COR_ILMETHOD>(pMethodDesc->GetILHeader(TRUE));
+ }
+ }
+}
+
+PTR_COR_ILMETHOD ILCodeVersion::GetILNoThrow() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ PTR_COR_ILMETHOD ret;
+ EX_TRY
+ {
+ ret = GetIL();
+ }
+ EX_CATCH
+ {
+ ret = NULL;
+ }
+ EX_END_CATCH(RethrowTerminalExceptions);
+ return ret;
+}
+
+DWORD ILCodeVersion::GetJitFlags() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetJitFlags();
+ }
+ else
+ {
+ return 0;
+ }
+}
+
+const InstrumentedILOffsetMapping* ILCodeVersion::GetInstrumentedILMap() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_storageKind == StorageKind::Explicit)
+ {
+ return AsNode()->GetInstrumentedILMap();
+ }
+ else
+ {
+ return NULL;
+ }
+}
+
+#ifndef DACCESS_COMPILE
+void ILCodeVersion::SetRejitState(RejitFlags newState)
+{
+ LIMITED_METHOD_CONTRACT;
+ AsNode()->SetRejitState(newState);
+}
+
+void ILCodeVersion::SetIL(COR_ILMETHOD* pIL)
+{
+ LIMITED_METHOD_CONTRACT;
+ AsNode()->SetIL(pIL);
+}
+
+void ILCodeVersion::SetJitFlags(DWORD flags)
+{
+ LIMITED_METHOD_CONTRACT;
+ AsNode()->SetJitFlags(flags);
+}
+
+void ILCodeVersion::SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap)
+{
+ LIMITED_METHOD_CONTRACT;
+ AsNode()->SetInstrumentedILMap(cMap, rgMap);
+}
+
+HRESULT ILCodeVersion::AddNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+ CodeVersionManager* pManager = GetModule()->GetCodeVersionManager();
+ HRESULT hr = pManager->AddNativeCodeVersion(*this, pClosedMethodDesc, pNativeCodeVersion);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ return S_OK;
+}
+
+HRESULT ILCodeVersion::GetOrCreateActiveNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pActiveNativeCodeVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+ NativeCodeVersion activeNativeChild = GetActiveNativeCodeVersion(pClosedMethodDesc);
+ if (activeNativeChild.IsNull())
+ {
+ if (FAILED(hr = AddNativeCodeVersion(pClosedMethodDesc, &activeNativeChild)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+ // The first added child should automatically become active
+ _ASSERTE(GetActiveNativeCodeVersion(pClosedMethodDesc) == activeNativeChild);
+ *pActiveNativeCodeVersion = activeNativeChild;
+ return S_OK;
+}
+
+HRESULT ILCodeVersion::SetActiveNativeCodeVersion(NativeCodeVersion activeNativeCodeVersion, BOOL fEESuspended)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+ MethodDesc* pMethodDesc = activeNativeCodeVersion.GetMethodDesc();
+ NativeCodeVersion prevActiveVersion = GetActiveNativeCodeVersion(pMethodDesc);
+ if (prevActiveVersion == activeNativeCodeVersion)
+ {
+ //nothing to do, this version is already active
+ return S_OK;
+ }
+
+ if (!prevActiveVersion.IsNull())
+ {
+ prevActiveVersion.SetActiveChildFlag(FALSE);
+ }
+ activeNativeCodeVersion.SetActiveChildFlag(TRUE);
+
+ // If needed update the published code body for this method
+ CodeVersionManager* pCodeVersionManager = GetModule()->GetCodeVersionManager();
+ if (pCodeVersionManager->GetActiveILCodeVersion(GetModule(), GetMethodDef()) == *this)
+ {
+ if (FAILED(hr = pCodeVersionManager->PublishNativeCodeVersion(pMethodDesc, activeNativeCodeVersion, fEESuspended)))
+ {
+ return hr;
+ }
+ }
+
+ return S_OK;
+}
+
+ILCodeVersionNode* ILCodeVersion::AsNode()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pVersionNode;
+}
+#endif //DACCESS_COMPILE
+
+PTR_ILCodeVersionNode ILCodeVersion::AsNode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pVersionNode;
+}
+
+ILCodeVersionCollection::ILCodeVersionCollection(PTR_Module pModule, mdMethodDef methodDef) :
+ m_pModule(pModule),
+ m_methodDef(methodDef)
+{}
+
+ILCodeVersionIterator ILCodeVersionCollection::Begin()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return ILCodeVersionIterator(this);
+}
+
+ILCodeVersionIterator ILCodeVersionCollection::End()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return ILCodeVersionIterator(NULL);
+}
+
+ILCodeVersionIterator::ILCodeVersionIterator(const ILCodeVersionIterator & iter) :
+ m_stage(iter.m_stage),
+ m_cur(iter.m_cur),
+ m_pLinkedListCur(iter.m_pLinkedListCur),
+ m_pCollection(iter.m_pCollection)
+{}
+
+ILCodeVersionIterator::ILCodeVersionIterator(ILCodeVersionCollection* pCollection) :
+ m_stage(pCollection != NULL ? IterationStage::Initial : IterationStage::End),
+ m_pLinkedListCur(dac_cast<PTR_ILCodeVersionNode>(nullptr)),
+ m_pCollection(pCollection)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ First();
+}
+
+const ILCodeVersion & ILCodeVersionIterator::Get() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_cur;
+}
+
+void ILCodeVersionIterator::First()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ Next();
+}
+
+void ILCodeVersionIterator::Next()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ if (m_stage == IterationStage::Initial)
+ {
+ m_stage = IterationStage::ImplicitCodeVersion;
+ m_cur = ILCodeVersion(m_pCollection->m_pModule, m_pCollection->m_methodDef);
+ return;
+ }
+ if (m_stage == IterationStage::ImplicitCodeVersion)
+ {
+ CodeVersionManager* pCodeVersionManager = m_pCollection->m_pModule->GetCodeVersionManager();
+ _ASSERTE(pCodeVersionManager->LockOwnedByCurrentThread());
+ PTR_ILCodeVersioningState pILCodeVersioningState = pCodeVersionManager->GetILCodeVersioningState(m_pCollection->m_pModule, m_pCollection->m_methodDef);
+ if (pILCodeVersioningState != NULL)
+ {
+ m_pLinkedListCur = pILCodeVersioningState->GetFirstVersionNode();
+ }
+ m_stage = IterationStage::LinkedList;
+ if (m_pLinkedListCur != NULL)
+ {
+ m_cur = ILCodeVersion(m_pLinkedListCur);
+ return;
+ }
+ }
+ if (m_stage == IterationStage::LinkedList)
+ {
+ if (m_pLinkedListCur != NULL)
+ {
+ m_pLinkedListCur = m_pLinkedListCur->GetNextILVersionNode();
+ }
+ if (m_pLinkedListCur != NULL)
+ {
+ m_cur = ILCodeVersion(m_pLinkedListCur);
+ return;
+ }
+ else
+ {
+ m_stage = IterationStage::End;
+ m_cur = ILCodeVersion();
+ return;
+ }
+ }
+}
+
+bool ILCodeVersionIterator::Equal(const ILCodeVersionIterator &i) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_cur == i.m_cur;
+}
+
+MethodDescVersioningState::MethodDescVersioningState(PTR_MethodDesc pMethodDesc) :
+ m_pMethodDesc(pMethodDesc),
+ m_flags(IsDefaultVersionActiveChildFlag),
+ m_nextId(1),
+ m_pFirstVersionNode(dac_cast<PTR_NativeCodeVersionNode>(nullptr))
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef FEATURE_JUMPSTAMP
+ ZeroMemory(m_rgSavedCode, JumpStubSize);
+#endif
+}
+
+PTR_MethodDesc MethodDescVersioningState::GetMethodDesc() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pMethodDesc;
+}
+
+#ifndef DACCESS_COMPILE
+NativeCodeVersionId MethodDescVersioningState::AllocateVersionId()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_nextId++;
+}
+#endif
+
+PTR_NativeCodeVersionNode MethodDescVersioningState::GetFirstVersionNode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pFirstVersionNode;
+}
+
+#ifdef FEATURE_JUMPSTAMP
+MethodDescVersioningState::JumpStampFlags MethodDescVersioningState::GetJumpStampState()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (JumpStampFlags)(m_flags & JumpStampMask);
+}
+
+#ifndef DACCESS_COMPILE
+void MethodDescVersioningState::SetJumpStampState(JumpStampFlags newState)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_flags = (m_flags & ~JumpStampMask) | (BYTE)newState;
+}
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+HRESULT MethodDescVersioningState::SyncJumpStamp(NativeCodeVersion nativeCodeVersion, BOOL fEESuspended)
+ {
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+ PCODE pCode = nativeCodeVersion.IsNull() ? NULL : nativeCodeVersion.GetNativeCode();
+ MethodDesc* pMethod = GetMethodDesc();
+ _ASSERTE(pMethod->IsVersionable() && pMethod->IsVersionableWithJumpStamp());
+
+ if (!pMethod->HasNativeCode())
+ {
+ //we'll set up the jump-stamp when the default native code is created
+ return S_OK;
+ }
+
+ if (!nativeCodeVersion.IsNull() && nativeCodeVersion.IsDefaultVersion())
+ {
+ return UndoJumpStampNativeCode(fEESuspended);
+ }
+ else
+ {
+ // We don't have new code ready yet, jumpstamp back to the prestub to let us generate it the next time
+ // the method is called
+ if (pCode == NULL)
+ {
+ if (!fEESuspended)
+ {
+ return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
+ }
+ return JumpStampNativeCode();
+ }
+ // We do know the new code body, install the jump stamp now
+ else
+ {
+ return UpdateJumpTarget(fEESuspended, pCode);
+ }
+ }
+}
+#endif // DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// Simple, thin abstraction of debugger breakpoint patching. Given an address and a
+// previously procured DebuggerControllerPatch governing the code address, this decides
+// whether the code address is patched. If so, it returns a pointer to the debugger's
+// buffer (of what's "underneath" the int 3 patch); otherwise, it returns the code
+// address itself.
+//
+// Arguments:
+// * pbCode - Code address to return if unpatched
+// * dbgpatch - DebuggerControllerPatch to test
+//
+// Return Value:
+// Either pbCode or the debugger's patch buffer, as per description above.
+//
+// Assumptions:
+// Caller must manually grab (and hold) the ControllerLockHolder and get the
+// DebuggerControllerPatch before calling this helper.
+//
+// Notes:
+// pbCode need not equal the code address governed by dbgpatch, but is always
+// "related" (and sometimes really is equal). For example, this helper may be used
+// when writing a code byte to an internal rejit buffer (e.g., in preparation for an
+// eventual 64-bit interlocked write into the code stream), and thus pbCode would
+// point into the internal rejit buffer whereas dbgpatch governs the corresponding
+// code byte in the live code stream. This function would then be used to determine
+// whether a byte should be written into the internal rejit buffer OR into the
+// debugger controller's breakpoint buffer.
+//
+
+LPBYTE FirstCodeByteAddr(LPBYTE pbCode, DebuggerControllerPatch * dbgpatch)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (dbgpatch != NULL && dbgpatch->IsActivated())
+ {
+ // Debugger has patched the code, so return the address of the buffer
+ return LPBYTE(&(dbgpatch->opcode));
+ }
+
+ // no active patch, just return the direct code address
+ return pbCode;
+}
+
+
+#ifdef _DEBUG
+#ifndef DACCESS_COMPILE
+BOOL MethodDescVersioningState::CodeIsSaved()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ for (size_t i = 0; i < sizeof(m_rgSavedCode); i++)
+ {
+ if (m_rgSavedCode[i] != 0)
+ return TRUE;
+ }
+ return FALSE;
+}
+#endif //DACCESS_COMPILE
+#endif //_DEBUG
+
+//---------------------------------------------------------------------------------------
+//
+// Do the actual work of stamping the top of originally-jitted-code with a jmp that goes
+// to the prestub. This can be called in one of three ways:
+// * Case 1: By RequestReJIT against an already-jitted function, in which case the
+// PCODE may be inferred by the MethodDesc, and our caller will have suspended
+// the EE for us, OR
+// * Case 2: By the prestub worker after jitting the original code of a function
+// (i.e., the "pre-rejit" scenario). In this case, the EE is not suspended. But
+// that's ok, because the PCODE has not yet been published to the MethodDesc, and
+// no thread can be executing inside the originally JITted function yet.
+// * Case 3: At type/method restore time for an NGEN'ed assembly. This is also the pre-rejit
+// scenario because we are guaranteed to do this before the code in the module
+// is executable. EE suspend is not required.
+//
+// Arguments:
+// * pCode - Case 1 (above): will be NULL, and we can infer the PCODE from the
+// MethodDesc; Case 2+3 (above, pre-rejit): will be non-NULL, and we'll need to use
+// this to find the code to stamp on top of.
+//
+// Return Value:
+// * S_OK: Either we successfully did the jmp-stamp, or a racing thread took care of
+// it for us.
+// * Else, HRESULT indicating failure.
+//
+// Assumptions:
+// The caller will have suspended the EE if necessary (case 1), before this is
+// called.
+//
+#ifndef DACCESS_COMPILE
+HRESULT MethodDescVersioningState::JumpStampNativeCode(PCODE pCode /* = NULL */)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ // It may seem dangerous to be stamping jumps over code while a GC is going on,
+ // but we're actually safe. As we assert below, either we're holding the thread
+ // store lock (and thus preventing a GC) OR we're stamping code that has not yet
+ // been published (and will thus not be executed by managed therads or examined
+ // by the GC).
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ PCODE pCodePublished = GetMethodDesc()->GetNativeCode();
+
+ _ASSERTE((pCode != NULL) || (pCodePublished != NULL));
+ _ASSERTE(GetMethodDesc()->GetCodeVersionManager()->LockOwnedByCurrentThread());
+
+ HRESULT hr = S_OK;
+
+ // We'll jump-stamp over pCode, or if pCode is NULL, jump-stamp over the published
+ // code for this's MethodDesc.
+ LPBYTE pbCode = (LPBYTE)pCode;
+ if (pbCode == NULL)
+ {
+ // If caller didn't specify a pCode, just use the one that was published after
+ // the original JIT. (A specific pCode would be passed in the pre-rejit case,
+ // to jump-stamp the original code BEFORE the PCODE gets published.)
+ pbCode = (LPBYTE)pCodePublished;
+ }
+ _ASSERTE(pbCode != NULL);
+
+ // The debugging API may also try to write to the very top of this function (though
+ // with an int 3 for breakpoint purposes). Coordinate with the debugger so we know
+ // whether we can safely patch the actual code, or instead write to the debugger's
+ // buffer.
+ DebuggerController::ControllerLockHolder lockController;
+
+ if (GetJumpStampState() == JumpStampToPrestub)
+ {
+ // The method has already been jump stamped so nothing left to do
+ _ASSERTE(CodeIsSaved());
+ return S_OK;
+ }
+
+ // Remember what we're stamping our jump on top of, so we can replace it during a
+ // revert.
+ if (GetJumpStampState() == JumpStampNone)
+ {
+ for (int i = 0; i < sizeof(m_rgSavedCode); i++)
+ {
+ m_rgSavedCode[i] = *FirstCodeByteAddr(pbCode + i, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)(pbCode + i)));
+ }
+ }
+
+ EX_TRY
+ {
+ AllocMemTracker amt;
+
+ // This guy might throw on out-of-memory, so rely on the tracker to clean-up
+ Precode * pPrecode = Precode::Allocate(PRECODE_STUB, GetMethodDesc(), GetMethodDesc()->GetLoaderAllocator(), &amt);
+ PCODE target = pPrecode->GetEntryPoint();
+
+#if defined(_X86_) || defined(_AMD64_)
+
+ // Normal unpatched code never starts with a jump
+ _ASSERTE(GetJumpStampState() == JumpStampToActiveVersion ||
+ *FirstCodeByteAddr(pbCode, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)pbCode)) != X86_INSTR_JMP_REL32);
+
+ INT64 i64OldCode = *(INT64*)pbCode;
+ INT64 i64NewCode = i64OldCode;
+ LPBYTE pbNewValue = (LPBYTE)&i64NewCode;
+ *pbNewValue = X86_INSTR_JMP_REL32;
+ INT32 UNALIGNED * pOffset = reinterpret_cast<INT32 UNALIGNED *>(&pbNewValue[1]);
+ // This will throw for out-of-memory, so don't write anything until
+ // after he succeeds
+ // This guy will leak/cache/reuse the jumpstub
+ *pOffset = rel32UsingJumpStub(reinterpret_cast<INT32 UNALIGNED *>(pbCode + 1), target, GetMethodDesc(), GetMethodDesc()->GetLoaderAllocator());
+
+ // If we have the EE suspended or the code is unpublished there won't be contention on this code
+ hr = UpdateJumpStampHelper(pbCode, i64OldCode, i64NewCode, FALSE);
+ if (FAILED(hr))
+ {
+ ThrowHR(hr);
+ }
+
+ //
+ // No failure point after this!
+ //
+ amt.SuppressRelease();
+
+#else // _X86_ || _AMD64_
+#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
+#endif // _X86_ || _AMD64_
+
+ SetJumpStampState(JumpStampToPrestub);
+ }
+ EX_CATCH_HRESULT(hr);
+ _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
+
+ if (SUCCEEDED(hr))
+ {
+ _ASSERTE(GetJumpStampState() == JumpStampToPrestub);
+ _ASSERTE(m_rgSavedCode[0] != 0); // saved code should not start with 0
+ }
+
+ return hr;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// After code has been rejitted, this is called to update the jump-stamp to go from
+// pointing to the prestub, to pointing to the newly rejitted code.
+//
+// Arguments:
+// fEESuspended - TRUE if the caller keeps the EE suspended during this call
+// pRejittedCode - jitted code for the updated IL this method should execute
+//
+// Assumptions:
+// This rejit manager's table crst should be held by the caller
+//
+// Returns - S_OK if the jump target is updated
+// CORPROF_E_RUNTIME_SUSPEND_REQUIRED if the ee isn't suspended and it
+// will need to be in order to do the update safely
+HRESULT MethodDescVersioningState::UpdateJumpTarget(BOOL fEESuspended, PCODE pRejittedCode)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ }
+ CONTRACTL_END;
+
+ MethodDesc * pMD = GetMethodDesc();
+ _ASSERTE(pMD->GetCodeVersionManager()->LockOwnedByCurrentThread());
+
+ // It isn't safe to overwrite the original method prolog with a jmp because threads might
+ // be at an IP in the middle of the jump stamp already. However converting between different
+ // jump stamps is OK (when done atomically) because this only changes the jmp target, not
+ // instruction boundaries.
+ if (GetJumpStampState() == JumpStampNone && !fEESuspended)
+ {
+ return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
+ }
+
+ // Beginning of originally JITted code containing the jmp that we will redirect.
+ BYTE * pbCode = (BYTE*)pMD->GetNativeCode();
+
+ // Remember what we're stamping our jump on top of, so we can replace it during a
+ // revert.
+ if (GetJumpStampState() == JumpStampNone)
+ {
+ for (int i = 0; i < sizeof(m_rgSavedCode); i++)
+ {
+ m_rgSavedCode[i] = *FirstCodeByteAddr(pbCode + i, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)(pbCode + i)));
+ }
+ }
+
+#if defined(_X86_) || defined(_AMD64_)
+
+ HRESULT hr = S_OK;
+ {
+ DebuggerController::ControllerLockHolder lockController;
+
+ // This will throw for out-of-memory, so don't write anything until
+ // after he succeeds
+ // This guy will leak/cache/reuse the jumpstub
+ INT32 offset = 0;
+ EX_TRY
+ {
+ offset = rel32UsingJumpStub(
+ reinterpret_cast<INT32 UNALIGNED *>(&pbCode[1]), // base of offset
+ pRejittedCode, // target of jump
+ pMD,
+ pMD->GetLoaderAllocator());
+ }
+ EX_CATCH_HRESULT(hr);
+ _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+ // For validation later, remember what pbCode is right now
+ INT64 i64OldValue = *(INT64 *)pbCode;
+
+ // Assemble the INT64 of the new code bytes to write. Start with what's there now
+ INT64 i64NewValue = i64OldValue;
+ LPBYTE pbNewValue = (LPBYTE)&i64NewValue;
+
+ // First byte becomes a rel32 jmp instruction (if it wasn't already)
+ *pbNewValue = X86_INSTR_JMP_REL32;
+ // Next 4 bytes are the jmp target (offset to jmp stub)
+ INT32 UNALIGNED * pnOffset = reinterpret_cast<INT32 UNALIGNED *>(&pbNewValue[1]);
+ *pnOffset = offset;
+
+ hr = UpdateJumpStampHelper(pbCode, i64OldValue, i64NewValue, !fEESuspended);
+ _ASSERTE(hr == S_OK || (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED && !fEESuspended));
+ }
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+#else // _X86_ || _AMD64_
+#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
+#endif // _X86_ || _AMD64_
+
+ // State transition
+ SetJumpStampState(JumpStampToActiveVersion);
+ return S_OK;
+}
+
+
+//---------------------------------------------------------------------------------------
+//
+// Poke the JITted code to satsify a revert request (or to perform an implicit revert as
+// part of a second, third, etc. rejit request). Reinstates the originally JITted code
+// that had been jump-stamped over to perform a prior rejit.
+//
+// Arguments
+// fEESuspended - TRUE if the caller keeps the EE suspended during this call
+//
+//
+// Return Value:
+// S_OK to indicate the revert succeeded,
+// CORPROF_E_RUNTIME_SUSPEND_REQUIRED to indicate the jumpstamp hasn't been reverted
+// and EE suspension will be needed for success
+// other failure HRESULT indicating what went wrong.
+//
+// Assumptions:
+// Caller must be holding the owning ReJitManager's table crst.
+//
+HRESULT MethodDescVersioningState::UndoJumpStampNativeCode(BOOL fEESuspended)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(GetMethodDesc()->GetCodeVersionManager()->LockOwnedByCurrentThread());
+ if (GetJumpStampState() == JumpStampNone)
+ {
+ return S_OK;
+ }
+
+ _ASSERTE(m_rgSavedCode[0] != 0); // saved code should not start with 0
+
+ BYTE * pbCode = (BYTE*)GetMethodDesc()->GetNativeCode();
+ DebuggerController::ControllerLockHolder lockController;
+
+#if defined(_X86_) || defined(_AMD64_)
+ _ASSERTE(m_rgSavedCode[0] != X86_INSTR_JMP_REL32);
+ _ASSERTE(*FirstCodeByteAddr(pbCode, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)pbCode)) == X86_INSTR_JMP_REL32);
+#else
+#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
+#endif // _X86_ || _AMD64_
+
+ // For the interlocked compare, remember what pbCode is right now
+ INT64 i64OldValue = *(INT64 *)pbCode;
+ // Assemble the INT64 of the new code bytes to write. Start with what's there now
+ INT64 i64NewValue = i64OldValue;
+ memcpy(LPBYTE(&i64NewValue), m_rgSavedCode, sizeof(m_rgSavedCode));
+ HRESULT hr = UpdateJumpStampHelper(pbCode, i64OldValue, i64NewValue, !fEESuspended);
+ _ASSERTE(hr == S_OK || (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED && !fEESuspended));
+ if (hr != S_OK)
+ return hr;
+
+ // Transition state of this ReJitInfo to indicate the MD no longer has any jump stamp
+ SetJumpStampState(JumpStampNone);
+ return S_OK;
+}
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// This is called to modify the jump-stamp area, the first ReJitInfo::JumpStubSize bytes
+// in the method's code.
+//
+// Notes:
+// Callers use this method in a variety of circumstances:
+// a) when the code is unpublished (fContentionPossible == FALSE)
+// b) when the caller has taken the ThreadStoreLock and suspended the EE
+// (fContentionPossible == FALSE)
+// c) when the code is published, the EE isn't suspended, and the jumpstamp
+// area consists of a single 5 byte long jump instruction
+// (fContentionPossible == TRUE)
+// This method will attempt to alter the jump-stamp even if the caller has not prevented
+// contention, but there is no guarantee it will be succesful. When the caller has prevented
+// contention, then success is assured. Callers may oportunistically try without
+// EE suspension, and then upgrade to EE suspension if the first attempt fails.
+//
+// Assumptions:
+// This rejit manager's table crst should be held by the caller or fContentionPossible==FALSE
+// The debugger patch table lock should be held by the caller
+//
+// Arguments:
+// pbCode - pointer to the code where the jump stamp is placed
+// i64OldValue - the bytes which should currently be at the start of the method code
+// i64NewValue - the new bytes which should be written at the start of the method code
+// fContentionPossible - See the Notes section above.
+//
+// Returns:
+// S_OK => the jumpstamp has been succesfully updated.
+// CORPROF_E_RUNTIME_SUSPEND_REQUIRED => the jumpstamp remains unchanged (preventing contention will be necessary)
+// other failing HR => VirtualProtect failed, the jumpstamp remains unchanged
+//
+#ifndef DACCESS_COMPILE
+HRESULT MethodDescVersioningState::UpdateJumpStampHelper(BYTE* pbCode, INT64 i64OldValue, INT64 i64NewValue, BOOL fContentionPossible)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ MethodDesc * pMD = GetMethodDesc();
+ _ASSERTE(pMD->GetCodeVersionManager()->LockOwnedByCurrentThread() || !fContentionPossible);
+
+ // When ReJIT is enabled, method entrypoints are always at least 8-byte aligned (see
+ // code:EEJitManager::allocCode), so we can do a single 64-bit interlocked operation
+ // to update the jump target. However, some code may have gotten compiled before
+ // the profiler had a chance to enable ReJIT (e.g., NGENd code, or code JITted
+ // before a profiler attaches). In such cases, we cannot rely on a simple
+ // interlocked operation, and instead must suspend the runtime to ensure we can
+ // safely update the jmp instruction.
+ //
+ // This method doesn't verify that the method is actually safe to rejit, we expect
+ // callers to do that. At the moment NGEN'ed code is safe to rejit even if
+ // it is unaligned, but code generated before the profiler attaches is not.
+ if (fContentionPossible && !(IS_ALIGNED(pbCode, sizeof(INT64))))
+ {
+ return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
+ }
+
+ // The debugging API may also try to write to this function (though
+ // with an int 3 for breakpoint purposes). Coordinate with the debugger so we know
+ // whether we can safely patch the actual code, or instead write to the debugger's
+ // buffer.
+ if (fContentionPossible)
+ {
+ for (CORDB_ADDRESS_TYPE* pbProbeAddr = pbCode; pbProbeAddr < pbCode + MethodDescVersioningState::JumpStubSize; pbProbeAddr++)
+ {
+ if (NULL != DebuggerController::GetPatchTable()->GetPatch(pbProbeAddr))
+ {
+ return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
+ }
+ }
+ }
+
+#if defined(_X86_) || defined(_AMD64_)
+
+ DWORD oldProt;
+ if (!ClrVirtualProtect((LPVOID)pbCode, 8, PAGE_EXECUTE_READWRITE, &oldProt))
+ {
+ return HRESULT_FROM_WIN32(GetLastError());
+ }
+
+ if (fContentionPossible)
+ {
+ INT64 i64InterlockReportedOldValue = FastInterlockCompareExchangeLong((INT64 *)pbCode, i64NewValue, i64OldValue);
+ // Since changes to these bytes are protected by this rejitmgr's m_crstTable, we
+ // shouldn't have two writers conflicting.
+ _ASSERTE(i64InterlockReportedOldValue == i64OldValue);
+ }
+ else
+ {
+ // In this path the caller ensures:
+ // a) no thread will execute through the prologue area we are modifying
+ // b) no thread is stopped in a prologue such that it resumes in the middle of code we are modifying
+ // c) no thread is doing a debugger patch skip operation in which an unmodified copy of the method's
+ // code could be executed from a patch skip buffer.
+
+ // PERF: we might still want a faster path through here if we aren't debugging that doesn't do
+ // all the patch checks
+ for (int i = 0; i < MethodDescVersioningState::JumpStubSize; i++)
+ {
+ *FirstCodeByteAddr(pbCode + i, DebuggerController::GetPatchTable()->GetPatch(pbCode + i)) = ((BYTE*)&i64NewValue)[i];
+ }
+ }
+
+ if (oldProt != PAGE_EXECUTE_READWRITE)
+ {
+ // The CLR codebase in many locations simply ignores failures to restore the page protections
+ // Its true that it isn't a problem functionally, but it seems a bit sketchy?
+ // I am following the convention for now.
+ ClrVirtualProtect((LPVOID)pbCode, 8, oldProt, &oldProt);
+ }
+
+ FlushInstructionCache(GetCurrentProcess(), pbCode, MethodDescVersioningState::JumpStubSize);
+ return S_OK;
+
+#else // _X86_ || _AMD64_
+#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
+#endif // _X86_ || _AMD64_
+}
+#endif
+#endif // FEATURE_JUMPSTAMP
+
+BOOL MethodDescVersioningState::IsDefaultVersionActiveChild() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_flags & IsDefaultVersionActiveChildFlag) != 0;
+}
+#ifndef DACCESS_COMPILE
+void MethodDescVersioningState::SetDefaultVersionActiveChildFlag(BOOL isActive)
+{
+ LIMITED_METHOD_CONTRACT;
+ if (isActive)
+ {
+ m_flags |= IsDefaultVersionActiveChildFlag;
+ }
+ else
+ {
+ m_flags &= ~IsDefaultVersionActiveChildFlag;
+ }
+}
+
+void MethodDescVersioningState::LinkNativeCodeVersionNode(NativeCodeVersionNode* pNativeCodeVersionNode)
+{
+ LIMITED_METHOD_CONTRACT;
+ pNativeCodeVersionNode->m_pNextMethodDescSibling = m_pFirstVersionNode;
+ m_pFirstVersionNode = pNativeCodeVersionNode;
+}
+#endif
+
+ILCodeVersioningState::ILCodeVersioningState(PTR_Module pModule, mdMethodDef methodDef) :
+ m_activeVersion(ILCodeVersion(pModule,methodDef)),
+ m_pFirstVersionNode(dac_cast<PTR_ILCodeVersionNode>(nullptr)),
+ m_pModule(pModule),
+ m_methodDef(methodDef)
+{}
+
+
+ILCodeVersioningState::Key::Key() :
+ m_pModule(dac_cast<PTR_Module>(nullptr)),
+ m_methodDef(0)
+{}
+
+ILCodeVersioningState::Key::Key(PTR_Module pModule, mdMethodDef methodDef) :
+ m_pModule(pModule),
+ m_methodDef(methodDef)
+{}
+
+size_t ILCodeVersioningState::Key::Hash() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (size_t)(dac_cast<TADDR>(m_pModule) ^ m_methodDef);
+}
+
+bool ILCodeVersioningState::Key::operator==(const Key & rhs) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_pModule == rhs.m_pModule) && (m_methodDef == rhs.m_methodDef);
+}
+
+ILCodeVersioningState::Key ILCodeVersioningState::GetKey() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return Key(m_pModule, m_methodDef);
+}
+
+ILCodeVersion ILCodeVersioningState::GetActiveVersion() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_activeVersion;
+}
+
+PTR_ILCodeVersionNode ILCodeVersioningState::GetFirstVersionNode() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_pFirstVersionNode;
+}
+
+#ifndef DACCESS_COMPILE
+void ILCodeVersioningState::SetActiveVersion(ILCodeVersion ilActiveCodeVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+ m_activeVersion = ilActiveCodeVersion;
+}
+
+void ILCodeVersioningState::LinkILCodeVersionNode(ILCodeVersionNode* pILCodeVersionNode)
+{
+ LIMITED_METHOD_CONTRACT;
+ pILCodeVersionNode->SetNextILVersionNode(m_pFirstVersionNode);
+ m_pFirstVersionNode = pILCodeVersionNode;
+}
+#endif
+
+CodeVersionManager::CodeVersionManager()
+{}
+
+//---------------------------------------------------------------------------------------
+//
+// Called from BaseDomain::BaseDomain to do any constructor-time initialization.
+// Presently, this takes care of initializing the Crst, choosing the type based on
+// whether this ReJitManager belongs to the SharedDomain.
+//
+// Arguments:
+// * fSharedDomain - nonzero iff this ReJitManager belongs to the SharedDomain.
+//
+
+void CodeVersionManager::PreInit(BOOL fSharedDomain)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifndef DACCESS_COMPILE
+ m_crstTable.Init(
+ fSharedDomain ? CrstReJITSharedDomainTable : CrstReJITDomainTable,
+ CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD | CRST_REENTRANCY | CRST_TAKEN_DURING_SHUTDOWN));
+#endif // DACCESS_COMPILE
+}
+
+CodeVersionManager::TableLockHolder::TableLockHolder(CodeVersionManager* pCodeVersionManager) :
+ CrstHolder(&pCodeVersionManager->m_crstTable)
+{
+}
+#ifndef DACCESS_COMPILE
+void CodeVersionManager::EnterLock()
+{
+ m_crstTable.Enter();
+}
+void CodeVersionManager::LeaveLock()
+{
+ m_crstTable.Leave();
+}
+#endif
+
+#ifdef DEBUG
+BOOL CodeVersionManager::LockOwnedByCurrentThread() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+#ifdef DACCESS_COMPILE
+ return TRUE;
+#else
+ return const_cast<CrstExplicitInit &>(m_crstTable).OwnedByCurrentThread();
+#endif
+}
+#endif
+
+PTR_ILCodeVersioningState CodeVersionManager::GetILCodeVersioningState(PTR_Module pModule, mdMethodDef methodDef) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ ILCodeVersioningState::Key key = ILCodeVersioningState::Key(pModule, methodDef);
+ return m_ilCodeVersioningStateMap.Lookup(key);
+}
+
+PTR_MethodDescVersioningState CodeVersionManager::GetMethodDescVersioningState(PTR_MethodDesc pClosedMethodDesc) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ return m_methodDescVersioningStateMap.Lookup(pClosedMethodDesc);
+}
+
+#ifndef DACCESS_COMPILE
+HRESULT CodeVersionManager::GetOrCreateILCodeVersioningState(Module* pModule, mdMethodDef methodDef, ILCodeVersioningState** ppILCodeVersioningState)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+ ILCodeVersioningState* pILCodeVersioningState = GetILCodeVersioningState(pModule, methodDef);
+ if (pILCodeVersioningState == NULL)
+ {
+ pILCodeVersioningState = new (nothrow) ILCodeVersioningState(pModule, methodDef);
+ if (pILCodeVersioningState == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ EX_TRY
+ {
+ // This throws when out of memory, but remains internally
+ // consistent (without adding the new element)
+ m_ilCodeVersioningStateMap.Add(pILCodeVersioningState);
+ }
+ EX_CATCH_HRESULT(hr);
+ if (FAILED(hr))
+ {
+ delete pILCodeVersioningState;
+ return hr;
+ }
+ }
+ *ppILCodeVersioningState = pILCodeVersioningState;
+ return S_OK;
+}
+
+HRESULT CodeVersionManager::GetOrCreateMethodDescVersioningState(MethodDesc* pMethod, MethodDescVersioningState** ppMethodVersioningState)
+{
+ LIMITED_METHOD_CONTRACT;
+ HRESULT hr = S_OK;
+ MethodDescVersioningState* pMethodVersioningState = m_methodDescVersioningStateMap.Lookup(pMethod);
+ if (pMethodVersioningState == NULL)
+ {
+ pMethodVersioningState = new (nothrow) MethodDescVersioningState(pMethod);
+ if (pMethodVersioningState == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ EX_TRY
+ {
+ // This throws when out of memory, but remains internally
+ // consistent (without adding the new element)
+ m_methodDescVersioningStateMap.Add(pMethodVersioningState);
+ }
+ EX_CATCH_HRESULT(hr);
+ if (FAILED(hr))
+ {
+ delete pMethodVersioningState;
+ return hr;
+ }
+ }
+ *ppMethodVersioningState = pMethodVersioningState;
+ return S_OK;
+}
+#endif // DACCESS_COMPILE
+
+DWORD CodeVersionManager::GetNonDefaultILVersionCount()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ //This function is legal to call WITHOUT taking the lock
+ //It is used to do a quick check if work might be needed without paying the overhead
+ //of acquiring the lock and doing dictionary lookups
+ return m_ilCodeVersioningStateMap.GetCount();
+}
+
+ILCodeVersionCollection CodeVersionManager::GetILCodeVersions(PTR_MethodDesc pMethod)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return GetILCodeVersions(dac_cast<PTR_Module>(pMethod->GetModule()), pMethod->GetMemberDef());
+}
+
+ILCodeVersionCollection CodeVersionManager::GetILCodeVersions(PTR_Module pModule, mdMethodDef methodDef)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return ILCodeVersionCollection(pModule, methodDef);
+}
+
+ILCodeVersion CodeVersionManager::GetActiveILCodeVersion(PTR_MethodDesc pMethod)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return GetActiveILCodeVersion(dac_cast<PTR_Module>(pMethod->GetModule()), pMethod->GetMemberDef());
+}
+
+ILCodeVersion CodeVersionManager::GetActiveILCodeVersion(PTR_Module pModule, mdMethodDef methodDef)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ ILCodeVersioningState* pILCodeVersioningState = GetILCodeVersioningState(pModule, methodDef);
+ if (pILCodeVersioningState == NULL)
+ {
+ return ILCodeVersion(pModule, methodDef);
+ }
+ else
+ {
+ return pILCodeVersioningState->GetActiveVersion();
+ }
+}
+
+ILCodeVersion CodeVersionManager::GetILCodeVersion(PTR_MethodDesc pMethod, ReJITID rejitId)
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+
+#ifdef FEATURE_REJIT
+ ILCodeVersionCollection collection = GetILCodeVersions(pMethod);
+ for (ILCodeVersionIterator cur = collection.Begin(), end = collection.End(); cur != end; cur++)
+ {
+ if (cur->GetVersionId() == rejitId)
+ {
+ return *cur;
+ }
+ }
+ return ILCodeVersion();
+#else // FEATURE_REJIT
+ _ASSERTE(rejitId == 0);
+ return ILCodeVersion(dac_cast<PTR_Module>(pMethod->GetModule()), pMethod->GetMemberDef());
+#endif // FEATURE_REJIT
+}
+
+NativeCodeVersionCollection CodeVersionManager::GetNativeCodeVersions(PTR_MethodDesc pMethod) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ return NativeCodeVersionCollection(pMethod, ILCodeVersion());
+}
+
+NativeCodeVersion CodeVersionManager::GetNativeCodeVersion(PTR_MethodDesc pMethod, PCODE codeStartAddress) const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+
+ NativeCodeVersionCollection nativeCodeVersions = GetNativeCodeVersions(pMethod);
+ for (NativeCodeVersionIterator cur = nativeCodeVersions.Begin(), end = nativeCodeVersions.End(); cur != end; cur++)
+ {
+ if (cur->GetNativeCode() == codeStartAddress)
+ {
+ return *cur;
+ }
+ }
+ return NativeCodeVersion();
+}
+
+#ifndef DACCESS_COMPILE
+HRESULT CodeVersionManager::AddILCodeVersion(Module* pModule, mdMethodDef methodDef, ReJITID rejitId, ILCodeVersion* pILCodeVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+
+ ILCodeVersioningState* pILCodeVersioningState;
+ HRESULT hr = GetOrCreateILCodeVersioningState(pModule, methodDef, &pILCodeVersioningState);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+
+ ILCodeVersionNode* pILCodeVersionNode = new (nothrow) ILCodeVersionNode(pModule, methodDef, rejitId);
+ if (pILCodeVersionNode == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ pILCodeVersioningState->LinkILCodeVersionNode(pILCodeVersionNode);
+ *pILCodeVersion = ILCodeVersion(pILCodeVersionNode);
+ return S_OK;
+}
+
+HRESULT CodeVersionManager::SetActiveILCodeVersions(ILCodeVersion* pActiveVersions, DWORD cActiveVersions, BOOL fEESuspended, CDynArray<CodePublishError> * pErrors)
+{
+ // If the IL version is in the shared domain we need to iterate all domains
+ // looking for instantiations. The domain iterator lock is bigger than
+ // the code version manager lock so we can't do this atomically. In one atomic
+ // update the bookkeeping for IL versioning will happen and then in a second
+ // update the active native code versions will change/code jumpstamps+precodes
+ // will update.
+ //
+ // Note: For all domains other than the shared AppDomain we could do this
+ // atomically, but for now we use the lowest common denominator for all
+ // domains.
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pActiveVersions));
+ PRECONDITION(CheckPointer(pErrors, NULL_OK));
+ }
+ CONTRACTL_END;
+ _ASSERTE(!LockOwnedByCurrentThread());
+ HRESULT hr = S_OK;
+
+#if DEBUG
+ for (DWORD i = 0; i < cActiveVersions; i++)
+ {
+ ILCodeVersion activeVersion = pActiveVersions[i];
+ if (activeVersion.IsNull())
+ {
+ _ASSERTE(!"The active IL version can't be NULL");
+ }
+ }
+#endif
+
+ // step 1 - mark the IL versions as being active, this ensures that
+ // any new method instantiations added after this point will bind to
+ // the correct version
+ {
+ TableLockHolder(this);
+ for (DWORD i = 0; i < cActiveVersions; i++)
+ {
+ ILCodeVersion activeVersion = pActiveVersions[i];
+ ILCodeVersioningState* pILCodeVersioningState = NULL;
+ if (FAILED(hr = GetOrCreateILCodeVersioningState(activeVersion.GetModule(), activeVersion.GetMethodDef(), &pILCodeVersioningState)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ pILCodeVersioningState->SetActiveVersion(activeVersion);
+ }
+ }
+
+ // step 2 - determine the set of pre-existing method instantiations
+
+ // a parallel array to activeVersions
+ // for each ILCodeVersion in activeVersions, this lists the set
+ // MethodDescs that will need to be updated
+ CDynArray<CDynArray<MethodDesc*>> methodDescsToUpdate;
+ CDynArray<CodePublishError> errorRecords;
+ for (DWORD i = 0; i < cActiveVersions; i++)
+ {
+ CDynArray<MethodDesc*>* pMethodDescs = methodDescsToUpdate.Append();
+ if (pMethodDescs == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ *pMethodDescs = CDynArray<MethodDesc*>();
+
+ MethodDesc* pLoadedMethodDesc = pActiveVersions[i].GetModule()->LookupMethodDef(pActiveVersions[i].GetMethodDef());
+ if (FAILED(hr = CodeVersionManager::EnumerateClosedMethodDescs(pLoadedMethodDesc, pMethodDescs, &errorRecords)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+
+ // step 3 - update each pre-existing method instantiation
+ {
+ TableLockHolder lock(this);
+ for (DWORD i = 0; i < cActiveVersions; i++)
+ {
+ // Its possible the active IL version has changed if
+ // another caller made an update while this method wasn't
+ // holding the lock. We will ensure that we synchronize
+ // publishing to whatever version is currently active, even
+ // if that isn't the IL version we set above.
+ //
+ // Note: Although we attempt to handle this case gracefully
+ // it isn't recommended for callers to do this. Racing two calls
+ // that set the IL version to different results means it will be
+ // completely arbitrary which version wins.
+ ILCodeVersion requestedActiveILVersion = pActiveVersions[i];
+ ILCodeVersion activeILVersion = GetActiveILCodeVersion(requestedActiveILVersion.GetModule(), requestedActiveILVersion.GetMethodDef());
+
+ CDynArray<MethodDesc*> methodDescs = methodDescsToUpdate[i];
+ for (int j = 0; j < methodDescs.Count(); j++)
+ {
+ // Get an the active child code version for this method instantiation (it might be NULL, that is OK)
+ NativeCodeVersion activeNativeChild = activeILVersion.GetActiveNativeCodeVersion(methodDescs[j]);
+
+ // Publish that child version, because it is the active native child of the active IL version
+ // Failing to publish is non-fatal, but we do record it so the caller is aware
+ if (FAILED(hr = PublishNativeCodeVersion(methodDescs[j], activeNativeChild, fEESuspended)))
+ {
+ if (FAILED(hr = AddCodePublishError(activeILVersion.GetModule(), activeILVersion.GetMethodDef(), methodDescs[j], hr, &errorRecords)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+ }
+ }
+ }
+
+ return S_OK;
+}
+
+HRESULT CodeVersionManager::AddNativeCodeVersion(ILCodeVersion ilCodeVersion, MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+
+ MethodDescVersioningState* pMethodVersioningState;
+ HRESULT hr = GetOrCreateMethodDescVersioningState(pClosedMethodDesc, &pMethodVersioningState);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+
+ NativeCodeVersionId newId = pMethodVersioningState->AllocateVersionId();
+ NativeCodeVersionNode* pNativeCodeVersionNode = new (nothrow) NativeCodeVersionNode(newId, pClosedMethodDesc, ilCodeVersion.GetVersionId());
+ if (pNativeCodeVersionNode == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+
+ pMethodVersioningState->LinkNativeCodeVersionNode(pNativeCodeVersionNode);
+
+ // the first child added is automatically considered the active one.
+ if (ilCodeVersion.GetActiveNativeCodeVersion(pClosedMethodDesc).IsNull())
+ {
+ pNativeCodeVersionNode->SetActiveChildFlag(TRUE);
+ _ASSERTE(!ilCodeVersion.GetActiveNativeCodeVersion(pClosedMethodDesc).IsNull());
+
+ // the new child shouldn't have any native code. If it did we might need to
+ // publish that code as part of adding the node which would require callers
+ // to pay attention to GC suspension and we'd need to report publishing errors
+ // back to them.
+ _ASSERTE(pNativeCodeVersionNode->GetNativeCode() == NULL);
+ }
+ *pNativeCodeVersion = NativeCodeVersion(pNativeCodeVersionNode);
+ return S_OK;
+}
+
+PCODE CodeVersionManager::PublishVersionableCodeIfNecessary(MethodDesc* pMethodDesc, BOOL fCanBackpatchPrestub)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(!LockOwnedByCurrentThread());
+ _ASSERTE(pMethodDesc->IsVersionable());
+ _ASSERTE(!pMethodDesc->IsPointingToPrestub() || !pMethodDesc->IsVersionableWithJumpStamp());
+
+ HRESULT hr = S_OK;
+ PCODE pCode = NULL;
+ BOOL fIsJumpStampMethod = pMethodDesc->IsVersionableWithJumpStamp();
+
+ NativeCodeVersion activeVersion;
+ {
+ TableLockHolder lock(this);
+ if (FAILED(hr = GetActiveILCodeVersion(pMethodDesc).GetOrCreateActiveNativeCodeVersion(pMethodDesc, &activeVersion)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ ReportCodePublishError(pMethodDesc->GetModule(), pMethodDesc->GetMemberDef(), pMethodDesc, hr);
+ return NULL;
+ }
+ }
+
+ BOOL fEESuspend = FALSE;
+ while (true)
+ {
+ // compile the code if needed
+ pCode = activeVersion.GetNativeCode();
+ if (pCode == NULL)
+ {
+ pCode = pMethodDesc->PrepareCode(activeVersion);
+ }
+
+ // suspend in preparation for publishing if needed
+ if (fEESuspend)
+ {
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
+ }
+
+ {
+ TableLockHolder lock(this);
+ // The common case is that newActiveCode == activeCode, however we did leave the lock so there is
+ // possibility that the active version has changed. If it has we need to restart the compilation
+ // and publishing process with the new active version instead.
+ //
+ // In theory it should be legitimate to break out of this loop and run the less recent active version,
+ // because ultimately this is a race between one thread that is updating the version and another thread
+ // trying to run the current version. However for back-compat with ReJIT we need to guarantee that
+ // a versioning update at least as late as the profiler JitCompilationFinished callback wins the race.
+ NativeCodeVersion newActiveVersion;
+ if (FAILED(hr = GetActiveILCodeVersion(pMethodDesc).GetOrCreateActiveNativeCodeVersion(pMethodDesc, &newActiveVersion)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ ReportCodePublishError(pMethodDesc->GetModule(), pMethodDesc->GetMemberDef(), pMethodDesc, hr);
+ pCode = NULL;
+ break;
+ }
+ if (newActiveVersion != activeVersion)
+ {
+ activeVersion = newActiveVersion;
+ }
+ else
+ {
+ // if we aren't allowed to backpatch we are done
+ if (!fCanBackpatchPrestub)
+ {
+ break;
+ }
+
+ // attempt to publish the active version still under the lock
+ if (FAILED(hr = PublishNativeCodeVersion(pMethodDesc, activeVersion, fEESuspend)))
+ {
+ // if we need an EESuspend to publish then start over. We have to leave the lock in order to suspend,
+ // and when we leave the lock the active version might change again. However now we know that suspend
+ if (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED)
+ {
+ _ASSERTE(!fEESuspend);
+ fEESuspend = true;
+ }
+ else
+ {
+ ReportCodePublishError(pMethodDesc->GetModule(), pMethodDesc->GetMemberDef(), pMethodDesc, hr);
+ pCode = NULL;
+ break;
+ }
+ }
+ else
+ {
+ //success
+ break;
+ }
+ }
+ } // exit lock
+
+ if (fEESuspend)
+ {
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ }
+ }
+
+ // if the EE is still suspended from breaking in the middle of the loop, resume it
+ if (fEESuspend)
+ {
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ }
+ return pCode;
+}
+
+HRESULT CodeVersionManager::PublishNativeCodeVersion(MethodDesc* pMethod, NativeCodeVersion nativeCodeVersion, BOOL fEESuspended)
+{
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(LockOwnedByCurrentThread());
+ _ASSERTE(pMethod->IsVersionable());
+ HRESULT hr = S_OK;
+ PCODE pCode = nativeCodeVersion.IsNull() ? NULL : nativeCodeVersion.GetNativeCode();
+ if (pMethod->IsVersionableWithPrecode())
+ {
+ Precode* pPrecode = pMethod->GetOrCreatePrecode();
+ if (pCode == NULL)
+ {
+ EX_TRY
+ {
+ pPrecode->Reset();
+ }
+ EX_CATCH_HRESULT(hr);
+ return hr;
+ }
+ else
+ {
+ EX_TRY
+ {
+ hr = pPrecode->SetTargetInterlocked(pCode, FALSE) ? S_OK : E_FAIL;
+ }
+ EX_CATCH_HRESULT(hr);
+ return hr;
+ }
+ }
+ else
+ {
+#ifndef FEATURE_JUMPSTAMP
+ _ASSERTE(!"This platform doesn't support JumpStamp but this method doesn't version with Precode,"
+ " this method can't be updated");
+ return E_FAIL;
+#else
+ MethodDescVersioningState* pVersioningState;
+ if (FAILED(hr = GetOrCreateMethodDescVersioningState(pMethod, &pVersioningState)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ return pVersioningState->SyncJumpStamp(nativeCodeVersion, fEESuspended);
+#endif
+ }
+}
+
+// static
+HRESULT CodeVersionManager::EnumerateClosedMethodDescs(
+ MethodDesc* pMD,
+ CDynArray<MethodDesc*> * pClosedMethodDescs,
+ CDynArray<CodePublishError> * pUnsupportedMethodErrors)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pMD, NULL_OK));
+ PRECONDITION(CheckPointer(pClosedMethodDescs));
+ PRECONDITION(CheckPointer(pUnsupportedMethodErrors));
+ }
+ CONTRACTL_END;
+ HRESULT hr = S_OK;
+ if (pMD == NULL)
+ {
+ // nothing is loaded yet so we're done for this method.
+ return S_OK;
+ }
+
+ if (!pMD->HasClassOrMethodInstantiation())
+ {
+ // We have a JITted non-generic.
+ MethodDesc ** ppMD = pClosedMethodDescs->Append();
+ if (ppMD == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ *ppMD = pMD;
+ }
+
+ if (!pMD->HasClassOrMethodInstantiation())
+ {
+ // not generic, we're done for this method
+ return S_OK;
+ }
+
+ // Ok, now the case of a generic function (or function on generic class), which
+ // is loaded, and may thus have compiled instantiations.
+ // It's impossible to get to any other kind of domain from the profiling API
+ Module* pModule = pMD->GetModule();
+ mdMethodDef methodDef = pMD->GetMemberDef();
+ BaseDomain * pBaseDomainFromModule = pModule->GetDomain();
+ _ASSERTE(pBaseDomainFromModule->IsAppDomain() ||
+ pBaseDomainFromModule->IsSharedDomain());
+
+ if (pBaseDomainFromModule->IsSharedDomain())
+ {
+ // Iterate through all modules loaded into the shared domain, to
+ // find all instantiations living in the shared domain. This will
+ // include orphaned code (i.e., shared code used by ADs that have
+ // all unloaded), which is good, because orphaned code could get
+ // re-adopted if a new AD is created that can use that shared code
+ hr = EnumerateDomainClosedMethodDescs(
+ NULL, // NULL means to search SharedDomain instead of an AD
+ pModule,
+ methodDef,
+ pClosedMethodDescs,
+ pUnsupportedMethodErrors);
+ }
+ else
+ {
+ // Module is unshared, so just use the module's domain to find instantiations.
+ hr = EnumerateDomainClosedMethodDescs(
+ pBaseDomainFromModule->AsAppDomain(),
+ pModule,
+ methodDef,
+ pClosedMethodDescs,
+ pUnsupportedMethodErrors);
+ }
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+
+ // We want to iterate through all compilations of existing instantiations to
+ // ensure they get marked for rejit. Note: There may be zero instantiations,
+ // but we won't know until we try.
+ if (pBaseDomainFromModule->IsSharedDomain())
+ {
+ // Iterate through all real domains, to find shared instantiations.
+ AppDomainIterator appDomainIterator(TRUE);
+ while (appDomainIterator.Next())
+ {
+ AppDomain * pAppDomain = appDomainIterator.GetDomain();
+ if (pAppDomain->IsUnloading())
+ {
+ continue;
+ }
+ hr = EnumerateDomainClosedMethodDescs(
+ pAppDomain,
+ pModule,
+ methodDef,
+ pClosedMethodDescs,
+ pUnsupportedMethodErrors);
+ if (FAILED(hr))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+ }
+ return S_OK;
+}
+
+// static
+HRESULT CodeVersionManager::EnumerateDomainClosedMethodDescs(
+ AppDomain * pAppDomainToSearch,
+ Module* pModuleContainingMethodDef,
+ mdMethodDef methodDef,
+ CDynArray<MethodDesc*> * pClosedMethodDescs,
+ CDynArray<CodePublishError> * pUnsupportedMethodErrors)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pAppDomainToSearch, NULL_OK));
+ PRECONDITION(CheckPointer(pModuleContainingMethodDef));
+ PRECONDITION(CheckPointer(pClosedMethodDescs));
+ PRECONDITION(CheckPointer(pUnsupportedMethodErrors));
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(methodDef != mdTokenNil);
+
+ HRESULT hr;
+
+ BaseDomain * pDomainContainingGenericDefinition = pModuleContainingMethodDef->GetDomain();
+
+#ifdef _DEBUG
+ // If the generic definition is not loaded domain-neutral, then all its
+ // instantiations will also be non-domain-neutral and loaded into the same
+ // domain as the generic definition. So the caller may only pass the
+ // domain containing the generic definition as pAppDomainToSearch
+ if (!pDomainContainingGenericDefinition->IsSharedDomain())
+ {
+ _ASSERTE(pDomainContainingGenericDefinition == pAppDomainToSearch);
+ }
+#endif //_DEBUG
+
+ // If pAppDomainToSearch is NULL, iterate through all existing
+ // instantiations loaded into the SharedDomain. If pAppDomainToSearch is non-NULL,
+ // iterate through all existing instantiations in pAppDomainToSearch, and only consider
+ // instantiations in non-domain-neutral assemblies (as we already covered domain
+ // neutral assemblies when we searched the SharedDomain).
+ LoadedMethodDescIterator::AssemblyIterationMode mode = LoadedMethodDescIterator::kModeSharedDomainAssemblies;
+ // these are the default flags which won't actually be used in shared mode other than
+ // asserting they were specified with their default values
+ AssemblyIterationFlags assemFlags = (AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution);
+ ModuleIterationOption moduleFlags = (ModuleIterationOption)kModIterIncludeLoaded;
+ if (pAppDomainToSearch != NULL)
+ {
+ mode = LoadedMethodDescIterator::kModeUnsharedADAssemblies;
+ assemFlags = (AssemblyIterationFlags)(kIncludeAvailableToProfilers | kIncludeExecution);
+ moduleFlags = (ModuleIterationOption)kModIterIncludeAvailableToProfilers;
+ }
+ LoadedMethodDescIterator it(
+ pAppDomainToSearch,
+ pModuleContainingMethodDef,
+ methodDef,
+ mode,
+ assemFlags,
+ moduleFlags);
+ CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
+ while (it.Next(pDomainAssembly.This()))
+ {
+ MethodDesc * pLoadedMD = it.Current();
+
+ if (!pLoadedMD->IsVersionable())
+ {
+ // For compatibility with the rejit APIs we ensure certain errors are detected and reported using their
+ // original HRESULTS
+ HRESULT errorHR = GetNonVersionableError(pLoadedMD);
+ if (FAILED(errorHR))
+ {
+ if (FAILED(hr = CodeVersionManager::AddCodePublishError(pModuleContainingMethodDef, methodDef, pLoadedMD, CORPROF_E_FUNCTION_IS_COLLECTIBLE, pUnsupportedMethodErrors)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ }
+ continue;
+ }
+
+#ifdef _DEBUG
+ if (!pDomainContainingGenericDefinition->IsSharedDomain())
+ {
+ // Method is defined outside of the shared domain, so its instantiation must
+ // be defined in the AD we're iterating over (pAppDomainToSearch, which, as
+ // asserted above, must be the same domain as the generic's definition)
+ _ASSERTE(pLoadedMD->GetDomain() == pAppDomainToSearch);
+ }
+#endif // _DEBUG
+
+ MethodDesc ** ppMD = pClosedMethodDescs->Append();
+ if (ppMD == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ *ppMD = pLoadedMD;
+ }
+ return S_OK;
+}
+#endif // DACCESS_COMPILE
+
+
+//---------------------------------------------------------------------------------------
+//
+// Given the default version code for a MethodDesc that is about to published, add
+// a jumpstamp pointing back to the prestub if the currently active version isn't
+// the default one. This called from the PublishMethodHolder.
+//
+// Arguments:
+// * pMD - MethodDesc to jmp-stamp
+// * pCode - Top of the code that was just jitted (using original IL).
+//
+//
+// Return value:
+// * S_OK: Either we successfully did the jmp-stamp, or we didn't have to
+// * Else, HRESULT indicating failure.
+
+// Assumptions:
+// The caller has not yet published pCode to the MethodDesc, so no threads can be
+// executing inside pMD's code yet. Thus, we don't need to suspend the runtime while
+// applying the jump-stamp like we usually do for rejit requests that are made after
+// a function has been JITted.
+//
+#ifndef DACCESS_COMPILE
+HRESULT CodeVersionManager::DoJumpStampIfNecessary(MethodDesc* pMD, PCODE pCode)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ CAN_TAKE_LOCK;
+ PRECONDITION(CheckPointer(pMD));
+ PRECONDITION(pCode != NULL);
+ }
+ CONTRACTL_END;
+
+ HRESULT hr;
+
+ _ASSERTE(LockOwnedByCurrentThread());
+
+ NativeCodeVersion activeCodeVersion = GetActiveILCodeVersion(pMD).GetActiveNativeCodeVersion(pMD);
+ if (activeCodeVersion.IsDefaultVersion())
+ {
+ //Method not requested to be rejitted, nothing to do
+ return S_OK;
+ }
+
+ if (!(pMD->IsVersionable() && pMD->IsVersionableWithJumpStamp()))
+ {
+ return GetNonVersionableError(pMD);
+ }
+
+#ifndef FEATURE_JUMPSTAMP
+ _ASSERTE(!"How did we get here? IsVersionableWithJumpStamp() should have been FALSE above");
+ return S_OK;
+#else
+ MethodDescVersioningState* pVersioningState;
+ if (FAILED(hr = GetOrCreateMethodDescVersioningState(pMD, &pVersioningState)))
+ {
+ _ASSERTE(hr == E_OUTOFMEMORY);
+ return hr;
+ }
+ if (pVersioningState->GetJumpStampState() != MethodDescVersioningState::JumpStampNone)
+ {
+ //JumpStamp already in place
+ return S_OK;
+ }
+ return pVersioningState->JumpStampNativeCode(pCode);
+#endif // FEATURE_JUMPSTAMP
+
+}
+#endif // DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+//static
+void CodeVersionManager::OnAppDomainExit(AppDomain * pAppDomain)
+{
+ LIMITED_METHOD_CONTRACT;
+ // This would clean up all the allocations we have done and synchronize with any threads that might
+ // still be using the data
+ _ASSERTE(!".Net Core shouldn't be doing app domain shutdown - if we start doing so this needs to be implemented");
+}
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// Small helper to determine whether a given (possibly instantiated generic) MethodDesc
+// is safe to rejit.
+//
+// Arguments:
+// pMD - MethodDesc to test
+// Return Value:
+// S_OK iff pMD is safe to rejit
+// CORPROF_E_FUNCTION_IS_COLLECTIBLE - function can't be rejitted because it is collectible
+//
+
+// static
+#ifndef DACCESS_COMPILE
+HRESULT CodeVersionManager::GetNonVersionableError(MethodDesc* pMD)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pMD != NULL);
+
+ // Weird, non-user functions were already weeded out in RequestReJIT(), and will
+ // also never be passed to us by the prestub worker (for the pre-rejit case).
+ _ASSERTE(pMD->IsIL());
+
+ // Any MethodDescs that could be collected are not currently supported. Although we
+ // rule out all Ref.Emit modules in RequestReJIT(), there can still exist types defined
+ // in a non-reflection module and instantiated into a collectible assembly
+ // (e.g., List<MyCollectibleStruct>). In the future we may lift this
+ // restriction by updating the ReJitManager when the collectible assemblies
+ // owning the instantiations get collected.
+ if (pMD->GetLoaderAllocator()->IsCollectible())
+ {
+ return CORPROF_E_FUNCTION_IS_COLLECTIBLE;
+ }
+
+ return S_OK;
+}
+#endif
+
+//---------------------------------------------------------------------------------------
+//
+// Helper that inits a new CodePublishError and adds it to the pErrors array
+//
+// Arguments:
+// * pModule - The module in the module/MethodDef identifier pair for the method which
+// had an error during rejit
+// * methodDef - The MethodDef in the module/MethodDef identifier pair for the method which
+// had an error during rejit
+// * pMD - If available, the specific method instance which had an error during rejit
+// * hrStatus - HRESULT for the rejit error that occurred
+// * pErrors - the list of error records that this method will append to
+//
+// Return Value:
+// * S_OK: error was appended
+// * E_OUTOFMEMORY: Not enough memory to create the new error item. The array is unchanged.
+//
+
+//static
+#ifndef DACCESS_COMPILE
+HRESULT CodeVersionManager::AddCodePublishError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus, CDynArray<CodePublishError> * pErrors)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (pErrors == NULL)
+ {
+ return S_OK;
+ }
+
+ CodePublishError* pError = pErrors->Append();
+ if (pError == NULL)
+ {
+ return E_OUTOFMEMORY;
+ }
+ pError->pModule = pModule;
+ pError->methodDef = methodDef;
+ pError->pMethodDesc = pMD;
+ pError->hrStatus = hrStatus;
+ return S_OK;
+}
+#endif
+
+#ifndef DACCESS_COMPILE
+void CodeVersionManager::ReportCodePublishError(CodePublishError* pErrorRecord)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ ReportCodePublishError(pErrorRecord->pModule, pErrorRecord->methodDef, pErrorRecord->pMethodDesc, pErrorRecord->hrStatus);
+}
+
+void CodeVersionManager::ReportCodePublishError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ CAN_TAKE_LOCK;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+#ifdef FEATURE_REJIT
+ BOOL isRejitted = FALSE;
+ {
+ TableLockHolder(this);
+ isRejitted = !GetActiveILCodeVersion(pModule, methodDef).IsDefaultVersion();
+ }
+
+ // this isn't perfect, we might be activating a tiered jitting variation of a rejitted
+ // method for example. If it proves to be an issue we can revisit.
+ if (isRejitted)
+ {
+ ReJitManager::ReportReJITError(pModule, methodDef, pMD, hrStatus);
+ }
+#endif
+}
+#endif // DACCESS_COMPILE
+
+//---------------------------------------------------------------------------------------
+//
+// PrepareCodeConfig::SetNativeCode() calls this to determine if there's a non-default code
+// version requested for a MethodDesc that has just been jitted for the first time.
+// This is also called when methods are being restored in NGEN images. The sequence looks like:
+// *Enter holder
+// Enter code version manager lock
+// DoJumpStampIfNecessary
+// *Runtime code publishes/restores method
+// *Exit holder
+// Leave code version manager lock
+// Send rejit error callbacks if needed
+//
+//
+// #PublishCode:
+// Note that the runtime needs to publish/restore the PCODE while this holder is
+// on the stack, so it can happen under the code version manager's lock.
+// This prevents a race with a profiler that calls
+// RequestReJIT just as the method finishes compiling. In particular, the locking ensures
+// atomicity between this set of steps (performed in DoJumpStampIfNecessary):
+// * (1) Checking whether there is a non-default version for this MD
+// * (2) If not, skip doing the jmp-stamp
+// * (3) Publishing the PCODE
+//
+// with respect to these steps performed in RequestReJIT:
+// * (a) Is PCODE published yet?
+// * (b) Create non-default ILCodeVersion which the prestub will
+// consult when it JITs the original IL
+//
+// Without this atomicity, we could get the ordering (1), (2), (a), (b), (3), resulting
+// in the rejit request getting completely ignored (i.e., we file away the new ILCodeVersion
+// AFTER the prestub checks for it).
+//
+// A similar race is possible for code being restored. In that case the restoring thread
+// does:
+// * (1) Check if there is a non-default ILCodeVersion for this MD
+// * (2) If not, no need to jmp-stamp
+// * (3) Restore the MD
+
+// And RequestRejit does:
+// * (a) [In LoadedMethodDescIterator] Is a potential MD restored yet?
+// * (b) [In EnumerateDomainClosedMethodDescs] If not, don't queue it for jump-stamping
+//
+// Same ordering (1), (2), (a), (b), (3) results in missing both opportunities to jump
+// stamp.
+
+#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+PublishMethodHolder::PublishMethodHolder(MethodDesc* pMethodDesc, PCODE pCode) :
+ m_pMD(NULL), m_hr(S_OK)
+{
+ // This method can't have a contract because entering the table lock
+ // below increments GCNoTrigger count. Contracts always revert these changes
+ // at the end of the method but we need the incremented count to flow out of the
+ // method. The balancing decrement occurs in the destructor.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ STATIC_CONTRACT_MODE_ANY;
+
+ // We come here from the PreStub and from MethodDesc::CheckRestore
+ // The method should be effectively restored, but we haven't yet
+ // cleared the unrestored bit so we can't assert pMethodDesc->IsRestored()
+ // We can assert:
+ _ASSERTE(pMethodDesc->GetMethodTable()->IsRestored());
+
+ if (pCode != NULL)
+ {
+ m_pMD = pMethodDesc;
+ CodeVersionManager* pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
+ pCodeVersionManager->EnterLock();
+ m_hr = pCodeVersionManager->DoJumpStampIfNecessary(pMethodDesc, pCode);
+ }
+}
+
+
+PublishMethodHolder::~PublishMethodHolder()
+{
+ // This method can't have a contract because leaving the table lock
+ // below decrements GCNoTrigger count. Contracts always revert these changes
+ // at the end of the method but we need the decremented count to flow out of the
+ // method. The balancing increment occurred in the constructor.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS; // NOTRIGGER until we leave the lock
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ STATIC_CONTRACT_MODE_ANY;
+
+ if (m_pMD)
+ {
+ CodeVersionManager* pCodeVersionManager = m_pMD->GetCodeVersionManager();
+ pCodeVersionManager->LeaveLock();
+ if (FAILED(m_hr))
+ {
+ pCodeVersionManager->ReportCodePublishError(m_pMD->GetModule(), m_pMD->GetMemberDef(), m_pMD, m_hr);
+ }
+ }
+}
+
+PublishMethodTableHolder::PublishMethodTableHolder(MethodTable* pMethodTable) :
+ m_pMethodTable(NULL)
+{
+ // This method can't have a contract because entering the table lock
+ // below increments GCNoTrigger count. Contracts always revert these changes
+ // at the end of the method but we need the incremented count to flow out of the
+ // method. The balancing decrement occurs in the destructor.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ STATIC_CONTRACT_MODE_ANY;
+
+ // We come here from MethodTable::SetIsRestored
+ // The method table should be effectively restored, but we haven't yet
+ // cleared the unrestored bit so we can't assert pMethodTable->IsRestored()
+
+ m_pMethodTable = pMethodTable;
+ CodeVersionManager* pCodeVersionManager = pMethodTable->GetModule()->GetCodeVersionManager();
+ pCodeVersionManager->EnterLock();
+ MethodTable::IntroducedMethodIterator itMethods(pMethodTable, FALSE);
+ for (; itMethods.IsValid(); itMethods.Next())
+ {
+ // Although the MethodTable is restored, the methods might not be.
+ // We need to be careful to only query portions of the MethodDesc
+ // that work in a partially restored state. The only methods that need
+ // further restoration are IL stubs (which aren't rejittable) and
+ // generic methods. The only generic methods directly accesible from
+ // the MethodTable are definitions. GetNativeCode() on generic defs
+ // will run succesfully and return NULL which short circuits the
+ // rest of the logic.
+ MethodDesc * pMD = itMethods.GetMethodDesc();
+ PCODE pCode = pMD->GetNativeCode();
+ if (pCode != NULL)
+ {
+ HRESULT hr = pCodeVersionManager->DoJumpStampIfNecessary(pMD, pCode);
+ if (FAILED(hr))
+ {
+ CodeVersionManager::AddCodePublishError(pMD->GetModule(), pMD->GetMemberDef(), pMD, hr, &m_errors);
+ }
+ }
+ }
+}
+
+
+PublishMethodTableHolder::~PublishMethodTableHolder()
+{
+ // This method can't have a contract because leaving the table lock
+ // below decrements GCNoTrigger count. Contracts always revert these changes
+ // at the end of the method but we need the decremented count to flow out of the
+ // method. The balancing increment occurred in the constructor.
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_TRIGGERS; // NOTRIGGER until we leave the lock
+ STATIC_CONTRACT_CAN_TAKE_LOCK;
+ STATIC_CONTRACT_MODE_ANY;
+
+ if (m_pMethodTable)
+ {
+ CodeVersionManager* pCodeVersionManager = m_pMethodTable->GetModule()->GetCodeVersionManager();
+ pCodeVersionManager->LeaveLock();
+ for (int i = 0; i < m_errors.Count(); i++)
+ {
+ pCodeVersionManager->ReportCodePublishError(&(m_errors[i]));
+ }
+ }
+}
+#endif // !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
+
+#endif // FEATURE_CODE_VERSIONING
+
diff --git a/src/vm/codeversion.h b/src/vm/codeversion.h
new file mode 100644
index 0000000000..768c9cdb55
--- /dev/null
+++ b/src/vm/codeversion.h
@@ -0,0 +1,689 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// ===========================================================================
+// File: CodeVersion.h
+//
+// ===========================================================================
+
+
+#ifndef CODE_VERSION_H
+#define CODE_VERSION_H
+
+class NativeCodeVersion;
+class ILCodeVersion;
+typedef DWORD NativeCodeVersionId;
+
+#ifdef FEATURE_CODE_VERSIONING
+class NativeCodeVersionNode;
+typedef DPTR(class NativeCodeVersionNode) PTR_NativeCodeVersionNode;
+class NativeCodeVersionCollection;
+class NativeCodeVersionIterator;
+class ILCodeVersionNode;
+typedef DPTR(class ILCodeVersionNode) PTR_ILCodeVersionNode;
+class ILCodeVersionCollection;
+class ILCodeVersionIterator;
+class MethodDescVersioningState;
+typedef DPTR(class MethodDescVersioningState) PTR_MethodDescVersioningState;
+
+class ILCodeVersioningState;
+typedef DPTR(class ILCodeVersioningState) PTR_ILCodeVersioningState;
+class CodeVersionManager;
+typedef DPTR(class CodeVersionManager) PTR_CodeVersionManager;
+
+// This HRESULT is only used as a private implementation detail. Corerror.xml has a comment in it
+// reserving this value for our use but it doesn't appear in the public headers.
+#define CORPROF_E_RUNTIME_SUSPEND_REQUIRED 0x80131381
+
+#endif
+
+
+
+
+class NativeCodeVersion
+{
+#ifdef FEATURE_CODE_VERSIONING
+ friend class MethodDescVersioningState;
+ friend class ILCodeVersion;
+#endif
+
+public:
+ NativeCodeVersion();
+ NativeCodeVersion(const NativeCodeVersion & rhs);
+#ifdef FEATURE_CODE_VERSIONING
+ NativeCodeVersion(PTR_NativeCodeVersionNode pVersionNode);
+#endif
+ NativeCodeVersion(PTR_MethodDesc pMethod);
+ BOOL IsNull() const;
+ PTR_MethodDesc GetMethodDesc() const;
+ NativeCodeVersionId GetVersionId() const;
+ BOOL IsDefaultVersion() const;
+ PCODE GetNativeCode() const;
+ ILCodeVersion GetILCodeVersion() const;
+ ReJITID GetILCodeVersionId() const;
+#ifndef DACCESS_COMPILE
+ BOOL SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected = NULL);
+#endif
+#ifdef FEATURE_TIERED_COMPILATION
+ enum OptimizationTier
+ {
+ OptimizationTier0,
+ OptimizationTier1
+ };
+ OptimizationTier GetOptimizationTier() const;
+#ifndef DACCESS_COMPILE
+ void SetOptimizationTier(OptimizationTier tier);
+#endif
+#endif // FEATURE_TIERED_COMPILATION
+ bool operator==(const NativeCodeVersion & rhs) const;
+ bool operator!=(const NativeCodeVersion & rhs) const;
+#if defined(DACCESS_COMPILE) && defined(FEATURE_CODE_VERSIONING)
+ // The DAC is privy to the backing node abstraction
+ PTR_NativeCodeVersionNode AsNode() const;
+#endif
+
+private:
+
+#ifndef FEATURE_CODE_VERSIONING
+ MethodDesc* m_pMethodDesc;
+#else // FEATURE_CODE_VERSIONING
+
+#ifndef DACCESS_COMPILE
+ NativeCodeVersionNode* AsNode() const;
+ NativeCodeVersionNode* AsNode();
+ void SetActiveChildFlag(BOOL isActive);
+ MethodDescVersioningState* GetMethodDescVersioningState();
+#endif
+
+ BOOL IsActiveChildVersion() const;
+ PTR_MethodDescVersioningState GetMethodDescVersioningState() const;
+
+ enum StorageKind
+ {
+ Unknown,
+ Explicit,
+ Synthetic
+ };
+
+ StorageKind m_storageKind;
+ union
+ {
+ PTR_NativeCodeVersionNode m_pVersionNode;
+ struct SyntheticStorage
+ {
+ PTR_MethodDesc m_pMethodDesc;
+ } m_synthetic;
+ };
+#endif // FEATURE_CODE_VERSIONING
+};
+
+
+
+#ifdef FEATURE_CODE_VERSIONING
+
+
+
+class ILCodeVersion
+{
+ friend class NativeCodeVersionIterator;
+
+public:
+ ILCodeVersion();
+ ILCodeVersion(const ILCodeVersion & ilCodeVersion);
+ ILCodeVersion(PTR_ILCodeVersionNode pILCodeVersionNode);
+ ILCodeVersion(PTR_Module pModule, mdMethodDef methodDef);
+
+ bool operator==(const ILCodeVersion & rhs) const;
+ bool operator!=(const ILCodeVersion & rhs) const;
+ BOOL IsNull() const;
+ BOOL IsDefaultVersion() const;
+ PTR_Module GetModule() const;
+ mdMethodDef GetMethodDef() const;
+ ReJITID GetVersionId() const;
+ NativeCodeVersionCollection GetNativeCodeVersions(PTR_MethodDesc pClosedMethodDesc) const;
+ NativeCodeVersion GetActiveNativeCodeVersion(PTR_MethodDesc pClosedMethodDesc) const;
+ PTR_COR_ILMETHOD GetIL() const;
+ PTR_COR_ILMETHOD GetILNoThrow() const;
+ DWORD GetJitFlags() const;
+ const InstrumentedILOffsetMapping* GetInstrumentedILMap() const;
+
+#ifndef DACCESS_COMPILE
+ void SetIL(COR_ILMETHOD* pIL);
+ void SetJitFlags(DWORD flags);
+ void SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap);
+ HRESULT AddNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion);
+ HRESULT GetOrCreateActiveNativeCodeVersion(MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion);
+ HRESULT SetActiveNativeCodeVersion(NativeCodeVersion activeNativeCodeVersion, BOOL fEESuspended);
+#endif //DACCESS_COMPILE
+
+ enum RejitFlags
+ {
+ // The profiler has requested a ReJit, so we've allocated stuff, but we haven't
+ // called back to the profiler to get any info or indicate that the ReJit has
+ // started. (This Info can be 'reused' for a new ReJit if the
+ // profiler calls RequestRejit again before we transition to the next state.)
+ kStateRequested = 0x00000000,
+
+ // The CLR has initiated the call to the profiler's GetReJITParameters() callback
+ // but it hasn't completed yet. At this point we have to assume the profiler has
+ // commited to a specific IL body, even if the CLR doesn't know what it is yet.
+ // If the profiler calls RequestRejit we need to allocate a new ILCodeVersion
+ // and call GetReJITParameters() again.
+ kStateGettingReJITParameters = 0x00000001,
+
+ // We have asked the profiler about this method via ICorProfilerFunctionControl,
+ // and have thus stored the IL and codegen flags the profiler specified.
+ kStateActive = 0x00000002,
+
+ kStateMask = 0x0000000F,
+ };
+
+ RejitFlags GetRejitState() const;
+#ifndef DACCESS_COMPILE
+ void SetRejitState(RejitFlags newState);
+#endif
+
+#ifdef DACCESS_COMPILE
+ // The DAC is privy to the backing node abstraction
+ PTR_ILCodeVersionNode AsNode() const;
+#endif
+
+private:
+
+#ifndef DACCESS_COMPILE
+ PTR_ILCodeVersionNode AsNode();
+ PTR_ILCodeVersionNode AsNode() const;
+#endif
+
+ enum StorageKind
+ {
+ Unknown,
+ Explicit,
+ Synthetic
+ };
+
+ StorageKind m_storageKind;
+ union
+ {
+ PTR_ILCodeVersionNode m_pVersionNode;
+ struct SyntheticStorage
+ {
+ PTR_Module m_pModule;
+ mdMethodDef m_methodDef;
+ } m_synthetic;
+ };
+};
+
+
+class NativeCodeVersionNode
+{
+ friend NativeCodeVersionIterator;
+ friend MethodDescVersioningState;
+ friend ILCodeVersionNode;
+public:
+#ifndef DACCESS_COMPILE
+ NativeCodeVersionNode(NativeCodeVersionId id, MethodDesc* pMethod, ReJITID parentId);
+#endif
+#ifdef DEBUG
+ BOOL LockOwnedByCurrentThread() const;
+#endif
+ PTR_MethodDesc GetMethodDesc() const;
+ NativeCodeVersionId GetVersionId() const;
+ PCODE GetNativeCode() const;
+ ReJITID GetILVersionId() const;
+ ILCodeVersion GetILCodeVersion() const;
+ BOOL IsActiveChildVersion() const;
+#ifndef DACCESS_COMPILE
+ BOOL SetNativeCodeInterlocked(PCODE pCode, PCODE pExpected);
+ void SetActiveChildFlag(BOOL isActive);
+#endif
+#ifdef FEATURE_TIERED_COMPILATION
+ NativeCodeVersion::OptimizationTier GetOptimizationTier() const;
+#ifndef DACCESS_COMPILE
+ void SetOptimizationTier(NativeCodeVersion::OptimizationTier tier);
+#endif
+#endif
+
+private:
+ //union - could save a little memory?
+ //{
+ PCODE m_pNativeCode;
+ PTR_MethodDesc m_pMethodDesc;
+ //};
+
+ ReJITID m_parentId;
+ PTR_NativeCodeVersionNode m_pNextMethodDescSibling;
+ NativeCodeVersionId m_id;
+#ifdef FEATURE_TIERED_COMPILATION
+ Volatile<NativeCodeVersion::OptimizationTier> m_optTier;
+#endif
+
+ enum NativeCodeVersionNodeFlags
+ {
+ IsActiveChildFlag = 1
+ };
+ DWORD m_flags;
+};
+
+class NativeCodeVersionCollection
+{
+ friend class NativeCodeVersionIterator;
+public:
+ NativeCodeVersionCollection(PTR_MethodDesc pMethodDescFilter, ILCodeVersion ilCodeFilter);
+ NativeCodeVersionIterator Begin();
+ NativeCodeVersionIterator End();
+
+private:
+ PTR_MethodDesc m_pMethodDescFilter;
+ ILCodeVersion m_ilCodeFilter;
+};
+
+class NativeCodeVersionIterator : public Enumerator<const NativeCodeVersion, NativeCodeVersionIterator>
+{
+ friend class Enumerator<const NativeCodeVersion, NativeCodeVersionIterator>;
+
+public:
+ NativeCodeVersionIterator(NativeCodeVersionCollection* pCollection);
+ CHECK Check() const { CHECK_OK; }
+
+protected:
+ const NativeCodeVersion & Get() const;
+ void First();
+ void Next();
+ bool Equal(const NativeCodeVersionIterator &i) const;
+
+ CHECK DoCheck() const { CHECK_OK; }
+
+private:
+ enum IterationStage
+ {
+ Initial,
+ ImplicitCodeVersion,
+ LinkedList,
+ End
+ };
+ IterationStage m_stage;
+ NativeCodeVersionCollection* m_pCollection;
+ PTR_NativeCodeVersionNode m_pLinkedListCur;
+ NativeCodeVersion m_cur;
+};
+
+class ILCodeVersionNode
+{
+public:
+ ILCodeVersionNode();
+#ifndef DACCESS_COMPILE
+ ILCodeVersionNode(Module* pModule, mdMethodDef methodDef, ReJITID id);
+#endif
+#ifdef DEBUG
+ BOOL LockOwnedByCurrentThread() const;
+#endif //DEBUG
+ PTR_Module GetModule() const;
+ mdMethodDef GetMethodDef() const;
+ ReJITID GetVersionId() const;
+ PTR_COR_ILMETHOD GetIL() const;
+ DWORD GetJitFlags() const;
+ const InstrumentedILOffsetMapping* GetInstrumentedILMap() const;
+ ILCodeVersion::RejitFlags GetRejitState() const;
+ PTR_ILCodeVersionNode GetNextILVersionNode() const;
+#ifndef DACCESS_COMPILE
+ void SetIL(COR_ILMETHOD* pIL);
+ void SetJitFlags(DWORD flags);
+ void SetInstrumentedILMap(SIZE_T cMap, COR_IL_MAP * rgMap);
+ void SetRejitState(ILCodeVersion::RejitFlags newState);
+ void SetNextILVersionNode(ILCodeVersionNode* pNextVersionNode);
+#endif
+
+private:
+ PTR_Module m_pModule;
+ mdMethodDef m_methodDef;
+ ReJITID m_rejitId;
+ PTR_ILCodeVersionNode m_pNextILVersionNode;
+ Volatile<ILCodeVersion::RejitFlags> m_rejitState;
+ VolatilePtr<COR_ILMETHOD> m_pIL;
+ Volatile<DWORD> m_jitFlags;
+ InstrumentedILOffsetMapping m_instrumentedILMap;
+};
+
+class ILCodeVersionCollection
+{
+ friend class ILCodeVersionIterator;
+
+public:
+ ILCodeVersionCollection(PTR_Module pModule, mdMethodDef methodDef);
+ ILCodeVersionIterator Begin();
+ ILCodeVersionIterator End();
+
+private:
+ PTR_Module m_pModule;
+ mdMethodDef m_methodDef;
+};
+
+class ILCodeVersionIterator : public Enumerator<const ILCodeVersion, ILCodeVersionIterator>
+{
+ friend class Enumerator<const ILCodeVersion, ILCodeVersionIterator>;
+
+public:
+ ILCodeVersionIterator();
+ ILCodeVersionIterator(const ILCodeVersionIterator & iter);
+ ILCodeVersionIterator(ILCodeVersionCollection* pCollection);
+ CHECK Check() const { CHECK_OK; }
+
+protected:
+ const ILCodeVersion & Get() const;
+ void First();
+ void Next();
+ bool Equal(const ILCodeVersionIterator &i) const;
+
+ CHECK DoCheck() const { CHECK_OK; }
+
+private:
+ enum IterationStage
+ {
+ Initial,
+ ImplicitCodeVersion,
+ LinkedList,
+ End
+ };
+ IterationStage m_stage;
+ ILCodeVersion m_cur;
+ PTR_ILCodeVersionNode m_pLinkedListCur;
+ ILCodeVersionCollection* m_pCollection;
+};
+
+class MethodDescVersioningState
+{
+public:
+ // The size of the code used to jump stamp the prolog
+#ifdef FEATURE_JUMPSTAMP
+ static const size_t JumpStubSize =
+#if defined(_X86_) || defined(_AMD64_)
+ 5;
+#else
+#error "Need to define size of jump-stamp for this platform"
+#endif
+#endif // FEATURE_JUMPSTAMP
+
+ MethodDescVersioningState(PTR_MethodDesc pMethodDesc);
+ PTR_MethodDesc GetMethodDesc() const;
+ NativeCodeVersionId AllocateVersionId();
+ PTR_NativeCodeVersionNode GetFirstVersionNode() const;
+
+#ifndef DACCESS_COMPILE
+#ifdef FEATURE_JUMPSTAMP
+ HRESULT SyncJumpStamp(NativeCodeVersion nativeCodeVersion, BOOL fEESuspended);
+ HRESULT UpdateJumpTarget(BOOL fEESuspended, PCODE pRejittedCode);
+ HRESULT UndoJumpStampNativeCode(BOOL fEESuspended);
+ HRESULT JumpStampNativeCode(PCODE pCode = NULL);
+#endif // FEATURE_JUMPSTAMP
+ void LinkNativeCodeVersionNode(NativeCodeVersionNode* pNativeCodeVersionNode);
+#endif // DACCESS_COMPILE
+
+#ifdef FEATURE_JUMPSTAMP
+ enum JumpStampFlags
+ {
+ // There is no jump stamp in place on this method (Either because
+ // there is no code at all, or there is code that hasn't been
+ // overwritten with a jump)
+ JumpStampNone = 0x0,
+
+ // The method code has the jump stamp written in, and it points to the Prestub
+ JumpStampToPrestub = 0x1,
+
+ // The method code has the jump stamp written in, and it points to the currently
+ // active code version
+ JumpStampToActiveVersion = 0x2,
+ };
+
+ JumpStampFlags GetJumpStampState();
+ void SetJumpStampState(JumpStampFlags newState);
+#endif // FEATURE_JUMPSTAMP
+
+ //read-write data for the default native code version
+ BOOL IsDefaultVersionActiveChild() const;
+#ifndef DACCESS_COMPILE
+ void SetDefaultVersionActiveChildFlag(BOOL isActive);
+#endif
+
+private:
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_JUMPSTAMP)
+ INDEBUG(BOOL CodeIsSaved();)
+ HRESULT UpdateJumpStampHelper(BYTE* pbCode, INT64 i64OldValue, INT64 i64NewValue, BOOL fContentionPossible);
+#endif
+ PTR_MethodDesc m_pMethodDesc;
+
+ enum MethodDescVersioningStateFlags
+ {
+ JumpStampMask = 0x3,
+ IsDefaultVersionActiveChildFlag = 0x4
+ };
+ BYTE m_flags;
+ NativeCodeVersionId m_nextId;
+ PTR_NativeCodeVersionNode m_pFirstVersionNode;
+
+
+ // The originally JITted code that was overwritten with the jmp stamp.
+#ifdef FEATURE_JUMPSTAMP
+ BYTE m_rgSavedCode[JumpStubSize];
+#endif
+};
+
+class MethodDescVersioningStateHashTraits : public NoRemoveSHashTraits<DefaultSHashTraits<PTR_MethodDescVersioningState>>
+{
+public:
+ typedef typename DefaultSHashTraits<PTR_MethodDescVersioningState>::element_t element_t;
+ typedef typename DefaultSHashTraits<PTR_MethodDescVersioningState>::count_t count_t;
+
+ typedef const PTR_MethodDesc key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e->GetMethodDesc();
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return k1 == k2;
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t)(size_t)dac_cast<TADDR>(k);
+ }
+
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; return dac_cast<PTR_MethodDescVersioningState>(nullptr); }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == NULL; }
+};
+
+typedef SHash<MethodDescVersioningStateHashTraits> MethodDescVersioningStateHash;
+
+class ILCodeVersioningState
+{
+public:
+ ILCodeVersioningState(PTR_Module pModule, mdMethodDef methodDef);
+ ILCodeVersion GetActiveVersion() const;
+ PTR_ILCodeVersionNode GetFirstVersionNode() const;
+#ifndef DACCESS_COMPILE
+ void SetActiveVersion(ILCodeVersion ilActiveCodeVersion);
+ void LinkILCodeVersionNode(ILCodeVersionNode* pILCodeVersionNode);
+#endif
+
+ struct Key
+ {
+ public:
+ Key();
+ Key(PTR_Module pModule, mdMethodDef methodDef);
+ size_t Hash() const;
+ bool operator==(const Key & rhs) const;
+ private:
+ PTR_Module m_pModule;
+ mdMethodDef m_methodDef;
+ };
+
+ Key GetKey() const;
+
+private:
+ ILCodeVersion m_activeVersion;
+ PTR_ILCodeVersionNode m_pFirstVersionNode;
+ PTR_Module m_pModule;
+ mdMethodDef m_methodDef;
+};
+
+class ILCodeVersioningStateHashTraits : public NoRemoveSHashTraits<DefaultSHashTraits<PTR_ILCodeVersioningState>>
+{
+public:
+ typedef typename DefaultSHashTraits<PTR_ILCodeVersioningState>::element_t element_t;
+ typedef typename DefaultSHashTraits<PTR_ILCodeVersioningState>::count_t count_t;
+
+ typedef const ILCodeVersioningState::Key key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e->GetKey();
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return k1 == k2;
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (count_t)k.Hash();
+ }
+
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; return dac_cast<PTR_ILCodeVersioningState>(nullptr); }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == NULL; }
+};
+
+typedef SHash<ILCodeVersioningStateHashTraits> ILCodeVersioningStateHash;
+
+
+class CodeVersionManager
+{
+ friend class ILCodeVersion;
+ friend class PublishMethodHolder;
+ friend class PublishMethodTableHolder;
+
+public:
+ CodeVersionManager();
+
+ void PreInit(BOOL fSharedDomain);
+
+ class TableLockHolder : public CrstHolder
+ {
+ public:
+ TableLockHolder(CodeVersionManager * pCodeVersionManager);
+ };
+ //Using the holder is preferable, but in some cases the holder can't be used
+#ifndef DACCESS_COMPILE
+ void EnterLock();
+ void LeaveLock();
+#endif
+
+#ifdef DEBUG
+ BOOL LockOwnedByCurrentThread() const;
+#endif
+
+ DWORD GetNonDefaultILVersionCount();
+ ILCodeVersionCollection GetILCodeVersions(PTR_MethodDesc pMethod);
+ ILCodeVersionCollection GetILCodeVersions(PTR_Module pModule, mdMethodDef methodDef);
+ ILCodeVersion GetActiveILCodeVersion(PTR_MethodDesc pMethod);
+ ILCodeVersion GetActiveILCodeVersion(PTR_Module pModule, mdMethodDef methodDef);
+ ILCodeVersion GetILCodeVersion(PTR_MethodDesc pMethod, ReJITID rejitId);
+ NativeCodeVersionCollection GetNativeCodeVersions(PTR_MethodDesc pMethod) const;
+ NativeCodeVersion GetNativeCodeVersion(PTR_MethodDesc pMethod, PCODE codeStartAddress) const;
+ PTR_ILCodeVersioningState GetILCodeVersioningState(PTR_Module pModule, mdMethodDef methodDef) const;
+ PTR_MethodDescVersioningState GetMethodDescVersioningState(PTR_MethodDesc pMethod) const;
+
+#ifndef DACCESS_COMPILE
+ struct CodePublishError
+ {
+ Module* pModule;
+ mdMethodDef methodDef;
+ MethodDesc* pMethodDesc;
+ HRESULT hrStatus;
+ };
+
+ HRESULT AddILCodeVersion(Module* pModule, mdMethodDef methodDef, ReJITID rejitId, ILCodeVersion* pILCodeVersion);
+ HRESULT AddNativeCodeVersion(ILCodeVersion ilCodeVersion, MethodDesc* pClosedMethodDesc, NativeCodeVersion* pNativeCodeVersion);
+ HRESULT DoJumpStampIfNecessary(MethodDesc* pMD, PCODE pCode);
+ PCODE PublishVersionableCodeIfNecessary(MethodDesc* pMethodDesc, BOOL fCanBackpatchPrestub);
+ HRESULT PublishNativeCodeVersion(MethodDesc* pMethodDesc, NativeCodeVersion nativeCodeVersion, BOOL fEESuspended);
+ HRESULT GetOrCreateMethodDescVersioningState(MethodDesc* pMethod, MethodDescVersioningState** ppMethodDescVersioningState);
+ HRESULT GetOrCreateILCodeVersioningState(Module* pModule, mdMethodDef methodDef, ILCodeVersioningState** ppILCodeVersioningState);
+ HRESULT SetActiveILCodeVersions(ILCodeVersion* pActiveVersions, DWORD cActiveVersions, BOOL fEESuspended, CDynArray<CodePublishError> * pPublishErrors);
+ static HRESULT AddCodePublishError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus, CDynArray<CodePublishError> * pErrors);
+ static HRESULT AddCodePublishError(NativeCodeVersion nativeCodeVersion, HRESULT hrStatus, CDynArray<CodePublishError> * pErrors);
+ static void OnAppDomainExit(AppDomain* pAppDomain);
+#endif
+
+private:
+
+#ifndef DACCESS_COMPILE
+ static HRESULT EnumerateClosedMethodDescs(MethodDesc* pMD, CDynArray<MethodDesc*> * pClosedMethodDescs, CDynArray<CodePublishError> * pUnsupportedMethodErrors);
+ static HRESULT EnumerateDomainClosedMethodDescs(
+ AppDomain * pAppDomainToSearch,
+ Module* pModuleContainingMethodDef,
+ mdMethodDef methodDef,
+ CDynArray<MethodDesc*> * pClosedMethodDescs,
+ CDynArray<CodePublishError> * pUnsupportedMethodErrors);
+ static HRESULT GetNonVersionableError(MethodDesc* pMD);
+ void ReportCodePublishError(CodePublishError* pErrorRecord);
+ void ReportCodePublishError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus);
+#endif
+
+ //Module,MethodDef -> ILCodeVersioningState
+ ILCodeVersioningStateHash m_ilCodeVersioningStateMap;
+
+ //closed MethodDesc -> MethodDescVersioningState
+ MethodDescVersioningStateHash m_methodDescVersioningStateMap;
+
+ CrstExplicitInit m_crstTable;
+};
+
+#endif // FEATURE_CODE_VERSIONING
+
+//
+// These holders are used by runtime code that is making new code
+// available for execution, either by publishing jitted code
+// or restoring NGEN code. It ensures the publishing is synchronized
+// with rejit requests
+//
+class PublishMethodHolder
+{
+public:
+#if !defined(FEATURE_CODE_VERSIONING) || defined(DACCESS_COMPILE) || defined(CROSSGEN_COMPILE)
+ PublishMethodHolder(MethodDesc* pMethod, PCODE pCode) { }
+#else
+ PublishMethodHolder(MethodDesc* pMethod, PCODE pCode);
+ ~PublishMethodHolder();
+#endif
+
+private:
+#if defined(FEATURE_CODE_VERSIONING)
+ MethodDesc * m_pMD;
+ HRESULT m_hr;
+#endif
+};
+
+class PublishMethodTableHolder
+{
+public:
+#if !defined(FEATURE_CODE_VERSIONING) || defined(DACCESS_COMPILE) || defined(CROSSGEN_COMPILE)
+ PublishMethodTableHolder(MethodTable* pMethodTable) { }
+#else
+ PublishMethodTableHolder(MethodTable* pMethodTable);
+ ~PublishMethodTableHolder();
+#endif
+
+private:
+#if defined(FEATURE_CODE_VERSIONING) && !defined(DACCESS_COMPILE)
+ MethodTable* m_pMethodTable;
+ CDynArray<CodeVersionManager::CodePublishError> m_errors;
+#endif
+};
+
+#endif // CODE_VERSION_H
diff --git a/src/vm/comcallablewrapper.cpp b/src/vm/comcallablewrapper.cpp
index fdb0e54a45..322bcdc268 100644
--- a/src/vm/comcallablewrapper.cpp
+++ b/src/vm/comcallablewrapper.cpp
@@ -37,7 +37,6 @@
#include "dispex.h"
#include "perfcounters.h"
#include "guidfromname.h"
-#include "security.h"
#include "comconnectionpoints.h"
#include <objsafe.h> // IID_IObjctSafe
#include "virtualcallstub.h"
@@ -3274,9 +3273,7 @@ inline IUnknown * ComCallWrapper::GetComIPFromCCW_VisibilityCheck(
}
CONTRACT_END;
- // Ensure that the interface we are passing out was defined in trusted code.
- if ((!(flags & GetComIPFromCCW::SuppressSecurityCheck) && pIntfComMT->IsDefinedInUntrustedCode()) ||
- // Do a visibility check if needed.
+ if (// Do a visibility check if needed.
((flags & GetComIPFromCCW::CheckVisibility) && (!pIntfComMT->IsComVisible())))
{
// If not, fail to return the interface.
@@ -3698,10 +3695,8 @@ IUnknown* ComCallWrapper::GetComIPFromCCW(ComCallWrapper *pWrap, REFIID riid, Me
ComMethodTable * pIntfComMT = ComMethodTable::ComMethodTableFromIP(pIntf);
// Manual inlining of GetComIPFromCCW_VisibilityCheck() for common case.
- if (// Ensure that the interface we are passing out was defined in trusted code.
- (!(flags & GetComIPFromCCW::SuppressSecurityCheck) && pIntfComMT->IsDefinedInUntrustedCode())
- // Do a visibility check if needed.
- || ((flags & GetComIPFromCCW::CheckVisibility) && (!pIntfComMT->IsComVisible())))
+ if (// Do a visibility check if needed.
+ ((flags & GetComIPFromCCW::CheckVisibility) && (!pIntfComMT->IsComVisible())))
{
// If not, fail to return the interface.
SafeRelease(pIntf);
@@ -5452,12 +5447,6 @@ ComMethodTable* ComCallWrapperTemplate::GetClassComMT()
MethodTable *pMT = m_thClass.GetMethodTable();
- // Preload the policy for these classes before we take the lock.
- for (MethodTable* pMethodTable = pMT; pMethodTable != NULL; pMethodTable = pMethodTable->GetParentMethodTable())
- {
- Security::CanCallUnmanagedCode(pMethodTable->GetModule());
- }
-
// We haven't set it up yet, generate one.
ComMethodTable* pClassComMT;
if (pMT->IsDelegate() && (pMT->IsProjectedFromWinRT() || WinRTTypeNameConverter::IsRedirectedType(pMT)))
@@ -5887,12 +5876,6 @@ ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForClass(MethodTable
if (IsTypeVisibleFromCom(TypeHandle(pComMT->m_pMT)))
pComMT->m_Flags |= enum_ComVisible;
- if (!Security::CanCallUnmanagedCode(pComMT->m_pMT->GetModule()))
- {
- pComMT->m_Flags |= enum_IsUntrusted;
- }
-
-
#if _DEBUG
{
// In debug set all the vtable slots to 0xDEADCA11.
@@ -5974,11 +5957,6 @@ ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForInterface(MethodT
if (pItfClass->GetClass()->IsComClassInterface())
pComMT->m_Flags |= enum_ComClassItf;
- if (!Security::CanCallUnmanagedCode(pComMT->m_pMT->GetModule()))
- {
- pComMT->m_Flags |= enum_IsUntrusted;
- }
-
#ifdef _DEBUG
{
// In debug set all the vtable slots to 0xDEADCA11.
@@ -6064,11 +6042,6 @@ ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForBasic(MethodTable
if (pMT->GetClass()->IsComClassInterface())
pComMT->m_Flags |= enum_ComClassItf;
- if (!Security::CanCallUnmanagedCode(pMT->GetModule()))
- {
- pComMT->m_Flags |= enum_IsUntrusted;
- }
-
#ifdef MDA_SUPPORTED
#ifdef _DEBUG
{
@@ -6150,11 +6123,6 @@ ComMethodTable* ComCallWrapperTemplate::CreateComMethodTableForDelegate(MethodTa
pComMT->m_Flags |= enum_GuidGenerated;
- if (!Security::CanCallUnmanagedCode(pComMT->m_pMT->GetModule()))
- {
- pComMT->m_Flags |= enum_IsUntrusted;
- }
-
#if _DEBUG
{
// In debug set all the vtable slots to 0xDEADCA11.
@@ -6287,11 +6255,6 @@ ComCallWrapperTemplate* ComCallWrapperTemplate::CreateTemplate(TypeHandle thClas
// Preload the policy for this interface
CCWInterfaceMapIterator it(thClass, pClsFact, true);
- while (it.Next())
- {
- Module *pModule = it.GetInterface()->GetModule();
- Security::CanCallUnmanagedCode(pModule);
- }
// Num interfaces in the template.
unsigned numInterfaces = it.GetCount();
diff --git a/src/vm/comcallablewrapper.h b/src/vm/comcallablewrapper.h
index 85647279f3..1a68135e77 100644
--- a/src/vm/comcallablewrapper.h
+++ b/src/vm/comcallablewrapper.h
@@ -572,7 +572,7 @@ enum Masks
enum_SigClassLoadChecked = 0x00000100,
enum_ComClassItf = 0x00000200,
enum_GuidGenerated = 0x00000400,
- enum_IsUntrusted = 0x00001000,
+ // enum_unused = 0x00001000,
enum_IsBasic = 0x00002000,
enum_IsWinRTDelegate = 0x00004000,
enum_IsWinRTTrivialAggregate = 0x00008000,
@@ -646,12 +646,6 @@ struct ComMethodTable
return (CorClassIfaceAttr)(m_Flags & enum_ClassInterfaceTypeMask);
}
- BOOL IsDefinedInUntrustedCode()
- {
- LIMITED_METHOD_CONTRACT;
- return (m_Flags & enum_IsUntrusted) ? TRUE : FALSE;
- }
-
BOOL IsIClassX()
{
LIMITED_METHOD_CONTRACT;
diff --git a/src/vm/comdelegate.cpp b/src/vm/comdelegate.cpp
index cee0d8c08a..961a758750 100644
--- a/src/vm/comdelegate.cpp
+++ b/src/vm/comdelegate.cpp
@@ -22,7 +22,6 @@
#include "mdaassistants.h"
#include "cgensys.h"
#include "asmconstants.h"
-#include "security.h"
#include "virtualcallstub.h"
#include "callingconvention.h"
#include "customattribute.h"
@@ -933,30 +932,6 @@ void COMDelegate::BindToMethod(DELEGATEREF *pRefThis,
pExactMethodType,
pTargetMethod->IsStatic() ? NULL : pInstanceMT,
pTargetMethod);
-
- // Ask for skip verification if a delegate over a .ctor or .cctor is requested.
- if (pTargetMethod->IsClassConstructorOrCtor())
- Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
-
-#ifdef FEATURE_COMINTEROP
- // Check if it's a COM object and if so, demand unmanaged code permission.
- // <TODO> I think we need a target check here. Investigate. </TODO>
- if (pTargetMethod && pTargetMethod->GetMethodTable()->IsComObjectType())
- Security::SpecialDemand(SSWT_DEMAND_FROM_NATIVE, SECURITY_UNMANAGED_CODE);
-#endif // FEATURE_COMINTEROP
-
- // Devdiv bug 296229: dangerous methods are those that make security decisions based on
- // the result of stack walks. When a delegate to such a method is invoked asynchronously
- // the stackwalker will stop at the remoting code and consider the caller unmanaged code.
- // Unmanaged code is allowed to bypass any security check.
- if (InvokeUtil::IsDangerousMethod(pTargetMethod))
- Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, REFLECTION_MEMBER_ACCESS);
-
- // Check whether the creator of the delegate lives in the same assembly as the target method. If not, and they aren't fully
- // trusted, we have to make this delegate a secure wrapper and allocate a new inner delegate to represent the real target.
- MethodDesc *pCreatorMethod = sCtx.GetCallerMethod();
- if (NeedsSecureDelegate(pCreatorMethod, sCtx.GetCallerDomain(), pTargetMethod))
- refRealDelegate = CreateSecureDelegate(*pRefThis, pCreatorMethod, pTargetMethod);
}
// If we didn't wrap the real delegate in a secure delegate then the real delegate is the one passed in.
@@ -1511,8 +1486,7 @@ OBJECTREF COMDelegate::ConvertToDelegate(LPVOID pCallback, MethodTable* pMT)
{
GCX_PREEMP();
- DWORD dwStubFlags = pMT->ClassRequiresUnmanagedCodeCheck() ? NDIRECTSTUB_FL_HASDECLARATIVESECURITY : 0;
- pMarshalStub = GetStubForInteropMethod(pMD, dwStubFlags, &(pClass->m_pForwardStubMD));
+ pMarshalStub = GetStubForInteropMethod(pMD, 0, &(pClass->m_pForwardStubMD));
// Save this new stub on the DelegateEEClass.
EnsureWritablePages(dac_cast<PVOID>(&pClass->m_pMarshalStub), sizeof(PCODE));
@@ -1633,9 +1607,6 @@ OBJECTREF COMDelegate::ConvertWinRTInterfaceToDelegate(IUnknown *pIdentity, Meth
DWORD dwStubFlags = NDIRECTSTUB_FL_COM | NDIRECTSTUB_FL_WINRT | NDIRECTSTUB_FL_WINRTDELEGATE;
- if (pMT->ClassRequiresUnmanagedCodeCheck())
- dwStubFlags |= NDIRECTSTUB_FL_HASDECLARATIVESECURITY;
-
pMarshalStub = GetStubForInteropMethod(pMD, dwStubFlags);
// At this point we must have a non-NULL ComPlusCallInfo
@@ -1737,9 +1708,6 @@ MethodDesc* COMDelegate::GetILStubMethodDesc(EEImplMethodDesc* pDelegateMD, DWOR
dwStubFlags |= NDIRECTSTUB_FL_DELEGATE;
}
- if (pMT->ClassRequiresUnmanagedCodeCheck())
- dwStubFlags |= NDIRECTSTUB_FL_HASDECLARATIVESECURITY;
-
PInvokeStaticSigInfo sigInfo(pDelegateMD);
return NDirect::CreateCLRToNativeILStub(&sigInfo, dwStubFlags, pDelegateMD);
}
@@ -1832,8 +1800,6 @@ FCIMPL3(PCODE, COMDelegate::AdjustTarget, Object* refThisUNSAFE, Object* targetU
#ifdef FEATURE_COMINTEROP
isComObject = pMTTarg->IsComObjectType();
- if (isComObject)
- DoUnmanagedCodeAccessCheck(pMeth);
#endif // FEATURE_COMINTEROP
if (!pMT->IsTransparentProxy())
@@ -1971,18 +1937,7 @@ FCIMPL3(void, COMDelegate::DelegateConstruct, Object* refThisUNSAFE, Object* tar
methodArgCount++; // count 'this'
}
- // do we need a secure delegate?
-
- // Devdiv bug 296229: dangerous methods are those that make security decisions based on
- // the result of stack walks. When a delegate to such a method is invoked asynchronously
- // the stackwalker will stop at the remoting code and consider the caller unmanaged code.
- // Unmanaged code is allowed to bypass any security check.
- if (InvokeUtil::IsDangerousMethod(pMeth))
- Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, REFLECTION_MEMBER_ACCESS);
-
- if (NeedsSecureDelegate(pCreatorMethod, GetAppDomain(), pMeth))
- gc.refThis = CreateSecureDelegate(gc.refThis, pCreatorMethod, pMeth);
- else if (NeedsWrapperDelegate(pMeth))
+ if (NeedsWrapperDelegate(pMeth))
gc.refThis = CreateSecureDelegate(gc.refThis, NULL, pMeth);
if (pMeth->GetLoaderAllocator()->IsCollectible())
@@ -2033,8 +1988,6 @@ FCIMPL3(void, COMDelegate::DelegateConstruct, Object* refThisUNSAFE, Object* tar
BOOL isComObject = false;
#ifdef FEATURE_COMINTEROP
isComObject = pMTTarg->IsComObjectType();
- if (isComObject)
- DoUnmanagedCodeAccessCheck(pMeth);
#endif // FEATURE_COMINTEROP
if (!pMTTarg->IsTransparentProxy())
@@ -2125,56 +2078,6 @@ FCIMPL3(void, COMDelegate::DelegateConstruct, Object* refThisUNSAFE, Object* tar
}
FCIMPLEND
-
-#ifdef FEATURE_COMINTEROP
-void COMDelegate::DoUnmanagedCodeAccessCheck(MethodDesc* pMeth)
-{
- // Skip if SuppressUnmanagedCodePermission is present
- if (pMeth->RequiresLinktimeCheck())
- {
- // Check whether this is actually a SuppressUnmanagedCodePermission attribute and
- // if so, don't do a demand
- {
- return;
- }
- }
-
- // If this method is defined directly on an interface, get that interface
- // Otherwise, from the class get the interface that this method is defined on.
- // Based on this interface, skip the check if the interface is DispatchOnly or
- // if the interface is defined in fully-trusted code.
- if (pMeth->IsComPlusCall())
- {
- ComPlusCallMethodDesc *pCMD = (ComPlusCallMethodDesc *)pMeth;
- MethodTable* pMTItf = (pCMD->m_pComPlusCallInfo == NULL ? NULL : pCMD->m_pComPlusCallInfo->m_pInterfaceMT);
-
- // If the interface methodtable is null, then the ComPlusCallMethodDesc hasn't been set up yet.
- if (pMTItf == NULL)
- {
- GCX_PREEMP();
- pMeth->DoPrestub(NULL);
- pMTItf = ((ComPlusCallMethodDesc*)pMeth)->m_pComPlusCallInfo->m_pInterfaceMT;
- }
- else
- {
- pMTItf->CheckRestore();
- }
-
- if (pMTItf->GetComInterfaceType() == ifDispatch)
- {
- return;
- }
- else if (Security::CanCallUnmanagedCode(pMTItf->GetModule()))
- {
- return;
- }
- }
-
- Security::SpecialDemand(SSWT_DEMAND_FROM_NATIVE, SECURITY_UNMANAGED_CODE);
-}
-#endif // FEATURE_COMINTEROP
-
-
MethodDesc *COMDelegate::GetMethodDesc(OBJECTREF orDelegate)
{
CONTRACTL
@@ -2463,20 +2366,6 @@ FCIMPLEND
#endif // CROSSGEN_COMPILE
-
-BOOL COMDelegate::NeedsSecureDelegate(MethodDesc* pCreatorMethod, AppDomain *pCreatorDomain, MethodDesc* pTargetMD)
-{
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- return FALSE;
-}
-
BOOL COMDelegate::NeedsWrapperDelegate(MethodDesc* pTargetMD)
{
LIMITED_METHOD_CONTRACT;
@@ -3422,19 +3311,13 @@ MethodDesc* COMDelegate::GetDelegateCtor(TypeHandle delegateType, MethodDesc *pT
if (!isStatic)
methodArgCount++; // count 'this'
MethodDesc *pCallerMethod = (MethodDesc*)pCtorData->pMethod;
- BOOL needsSecureDelegate = NeedsSecureDelegate(pCallerMethod, GetAppDomain(), pTargetMethod);
- if (!needsSecureDelegate && NeedsWrapperDelegate(pTargetMethod))
+ if (NeedsWrapperDelegate(pTargetMethod))
{
// If we need a wrapper even it is not a secure delegate, go through slow path
return NULL;
}
- // If this is a secure delegate case, and the secure delegate would have a pointer to a collectible
- // method in it, then use the slow path. This could be optimized with a set of fast paths.
- if (needsSecureDelegate && (pCallerMethod->IsLCGMethod() || pCallerMethod->GetLoaderAllocator()->IsCollectible()))
- return NULL;
-
// Force the slow path for nullable so that we can give the user an error in case were the verifier is not run.
MethodTable* pMT = pTargetMethod->GetMethodTable();
if (!pTargetMethod->IsStatic() && Nullable::IsNullableType(pMT))
@@ -3486,10 +3369,6 @@ MethodDesc* COMDelegate::GetDelegateCtor(TypeHandle delegateType, MethodDesc *pT
// Another is to pass a gchandle to the delegate ctor. This is fastest, but only works if we can predict the gc handle at this time.
// We will use this for the non secure variants
- // Collectible secure delegates can go down the slow path
- if (isCollectible && needsSecureDelegate)
- return NULL;
-
if (invokeArgCount == methodArgCount)
{
// case 2, 3, 6
@@ -3501,9 +3380,7 @@ MethodDesc* COMDelegate::GetDelegateCtor(TypeHandle delegateType, MethodDesc *pT
if (!isStatic && pTargetMethod->IsVirtual() && !pTargetMethod->GetMethodTable()->IsValueType())
{
// case 3
- if (needsSecureDelegate)
- pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_SECURE_VIRTUAL_DISPATCH);
- else if (isCollectible)
+ if (isCollectible)
pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_COLLECTIBLE_VIRTUAL_DISPATCH);
else
pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_VIRTUAL_DISPATCH);
@@ -3511,9 +3388,7 @@ MethodDesc* COMDelegate::GetDelegateCtor(TypeHandle delegateType, MethodDesc *pT
else
{
// case 2, 6
- if (needsSecureDelegate)
- pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_SECURE_OPENED);
- else if (isCollectible)
+ if (isCollectible)
pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_COLLECTIBLE_OPENED);
else
pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_OPENED);
@@ -3527,13 +3402,7 @@ MethodDesc* COMDelegate::GetDelegateCtor(TypeHandle delegateType, MethodDesc *pT
if (!pShuffleThunk)
pShuffleThunk = SetupShuffleThunk(pDelMT, pTargetMethod);
pCtorData->pArg3 = (void*)pShuffleThunk->GetEntryPoint();
- if (needsSecureDelegate)
- {
- // need to fill the info for the secure delegate
- pCtorData->pArg4 = (void *)GetSecureInvoke(pDelegateInvoke);
- pCtorData->pArg5 = pCallerMethod;
- }
- else if (isCollectible)
+ if (isCollectible)
{
pCtorData->pArg4 = pTargetMethodLoaderAllocator->GetLoaderAllocatorObjectHandle();
}
@@ -3557,41 +3426,22 @@ MethodDesc* COMDelegate::GetDelegateCtor(TypeHandle delegateType, MethodDesc *pT
(pTargetMethod->IsInterface() ||
(pTargetMethod->GetMethodTable()->IsValueType() && !pTargetMethod->IsUnboxingStub()));
- if (needsSecureDelegate)
- {
- if (needsRuntimeInfo)
- pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_SECURE_RT_CLOSED);
- else
- {
- if (!isStatic)
- pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_SECURE_CLOSED);
- else
- pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_SECURE_CLOSED_STATIC);
- }
-
- // need to fill the info for the secure delegate
- pCtorData->pArg3 = (void *)GetSecureInvoke(pDelegateInvoke);
- pCtorData->pArg4 = pCallerMethod;
- }
+ if (needsRuntimeInfo)
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_RT_CLOSED);
else
{
- if (needsRuntimeInfo)
- pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_RT_CLOSED);
+ if (!isStatic)
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_CLOSED);
else
{
- if (!isStatic)
- pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_CLOSED);
+ if (isCollectible)
+ {
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_COLLECTIBLE_CLOSED_STATIC);
+ pCtorData->pArg3 = pTargetMethodLoaderAllocator->GetLoaderAllocatorObjectHandle();
+ }
else
{
- if (isCollectible)
- {
- pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_COLLECTIBLE_CLOSED_STATIC);
- pCtorData->pArg3 = pTargetMethodLoaderAllocator->GetLoaderAllocatorObjectHandle();
- }
- else
- {
- pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_CLOSED_STATIC);
- }
+ pRealCtor = MscorlibBinder::GetMethod(METHOD__MULTICAST_DELEGATE__CTOR_CLOSED_STATIC);
}
}
}
diff --git a/src/vm/comdelegate.h b/src/vm/comdelegate.h
index 5630cf9a75..f6ca775b60 100644
--- a/src/vm/comdelegate.h
+++ b/src/vm/comdelegate.h
@@ -71,8 +71,6 @@ public:
static PCODE GetSecureInvoke(MethodDesc* pMD);
// determines where the delegate needs to be wrapped for non-security reason
static BOOL NeedsWrapperDelegate(MethodDesc* pTargetMD);
- // determines whether the delegate needs to be wrapped
- static BOOL NeedsSecureDelegate(MethodDesc* pCreatorMethod, AppDomain *pCreatorDomain, MethodDesc* pTargetMD);
// on entry delegate points to the delegate to wrap
static DELEGATEREF CreateSecureDelegate(DELEGATEREF delegate, MethodDesc* pCreatorMethod, MethodDesc* pTargetMD);
@@ -122,10 +120,6 @@ public:
static Stub *GenerateStubForHost(MethodDesc *pInvokeMD, MethodDesc *pStubMD, LPVOID pNativeTarget, Stub *pInnerStub);
#endif // _TARGET_X86_
-#ifdef FEATURE_COMINTEROP
- static void DoUnmanagedCodeAccessCheck(MethodDesc* pMeth);
-#endif // FEATURE_COMINTEROP
-
static MethodDesc * __fastcall GetMethodDesc(OBJECTREF obj);
static OBJECTREF GetTargetObject(OBJECTREF obj);
diff --git a/src/vm/commodule.cpp b/src/vm/commodule.cpp
index cb14967295..45b31bfe68 100644
--- a/src/vm/commodule.cpp
+++ b/src/vm/commodule.cpp
@@ -9,7 +9,6 @@
#include "reflectclasswriter.h"
#include "class.h"
#include "corpolicy.h"
-#include "security.h"
#include "ceesectionstring.h"
#include <cor.h>
#include "typeparse.h"
diff --git a/src/vm/compile.cpp b/src/vm/compile.cpp
index abfe07e6f4..b3d187c555 100644
--- a/src/vm/compile.cpp
+++ b/src/vm/compile.cpp
@@ -22,7 +22,6 @@
#include "compile.h"
#include "excep.h"
#include "field.h"
-#include "security.h"
#include "eeconfig.h"
#include "zapsig.h"
#include "gcrefmap.h"
diff --git a/src/vm/comsynchronizable.cpp b/src/vm/comsynchronizable.cpp
index 01ba49651b..8fce346142 100644
--- a/src/vm/comsynchronizable.cpp
+++ b/src/vm/comsynchronizable.cpp
@@ -20,7 +20,6 @@
#include "excep.h"
#include "vars.hpp"
#include "field.h"
-#include "security.h"
#include "comsynchronizable.h"
#include "dbginterface.h"
#include "comdelegate.h"
@@ -29,6 +28,10 @@
#include "appdomain.hpp"
#include "appdomain.inl"
+#ifndef FEATURE_PAL
+#include "utilcode.h"
+#endif
+
#include "newapis.h"
// To include definition of CAPTURE_BUCKETS_AT_TRANSITION
@@ -1543,9 +1546,18 @@ void QCALLTYPE ThreadNative::InformThreadNameChange(QCall::ThreadHandle thread,
QCALL_CONTRACT;
BEGIN_QCALL;
-
+
Thread* pThread = &(*thread);
+#ifndef FEATURE_PAL
+ // Set on Windows 10 Creators Update and later machines the unmanaged thread name as well. That will show up in ETW traces and debuggers which is very helpful
+ // if more and more threads get a meaningful name
+ if (len > 0 && name != NULL)
+ {
+ SetThreadName(pThread->GetThreadHandle(), name);
+ }
+#endif
+
#ifdef PROFILING_SUPPORTED
{
BEGIN_PIN_PROFILER(CORProfilerTrackThreads());
@@ -1612,22 +1624,41 @@ FCIMPL1(FC_BOOL_RET, ThreadNative::IsThreadpoolThread, ThreadBaseObject* thread)
}
FCIMPLEND
+INT32 QCALLTYPE ThreadNative::GetOptimalMaxSpinWaitsPerSpinIteration()
+{
+ QCALL_CONTRACT;
+
+ INT32 optimalMaxNormalizedYieldsPerSpinIteration;
+
+ BEGIN_QCALL;
+
+ Thread::EnsureYieldProcessorNormalizedInitialized();
+ optimalMaxNormalizedYieldsPerSpinIteration = Thread::GetOptimalMaxNormalizedYieldsPerSpinIteration();
+
+ END_QCALL;
+
+ return optimalMaxNormalizedYieldsPerSpinIteration;
+}
FCIMPL1(void, ThreadNative::SpinWait, int iterations)
{
FCALL_CONTRACT;
+ if (iterations <= 0)
+ {
+ return;
+ }
+
//
// If we're not going to spin for long, it's ok to remain in cooperative mode.
// The threshold is determined by the cost of entering preemptive mode; if we're
// spinning for less than that number of cycles, then switching to preemptive
- // mode won't help a GC start any faster. That number is right around 1000000
- // on my machine.
+ // mode won't help a GC start any faster.
//
- if (iterations <= 1000000)
+ if (iterations <= 100000 && Thread::IsYieldProcessorNormalizedInitialized())
{
- for(int i = 0; i < iterations; i++)
- YieldProcessor();
+ for (int i = 0; i < iterations; i++)
+ Thread::YieldProcessorNormalized();
return;
}
@@ -1637,8 +1668,9 @@ FCIMPL1(void, ThreadNative::SpinWait, int iterations)
HELPER_METHOD_FRAME_BEGIN_NOPOLL();
GCX_PREEMP();
- for(int i = 0; i < iterations; i++)
- YieldProcessor();
+ Thread::EnsureYieldProcessorNormalizedInitialized();
+ for (int i = 0; i < iterations; i++)
+ Thread::YieldProcessorNormalized();
HELPER_METHOD_FRAME_END();
}
diff --git a/src/vm/comsynchronizable.h b/src/vm/comsynchronizable.h
index 00b055c960..b280c605b8 100644
--- a/src/vm/comsynchronizable.h
+++ b/src/vm/comsynchronizable.h
@@ -97,6 +97,7 @@ public:
UINT64 QCALLTYPE GetProcessDefaultStackSize();
static FCDECL1(INT32, GetManagedThreadId, ThreadBaseObject* th);
+ static INT32 QCALLTYPE GetOptimalMaxSpinWaitsPerSpinIteration();
static FCDECL1(void, SpinWait, int iterations);
static BOOL QCALLTYPE YieldThread();
static FCDECL0(Object*, GetCurrentThread);
diff --git a/src/vm/comthreadpool.cpp b/src/vm/comthreadpool.cpp
index a9fad74cee..c49f83400c 100644
--- a/src/vm/comthreadpool.cpp
+++ b/src/vm/comthreadpool.cpp
@@ -23,7 +23,6 @@
#include "object.h"
#include "field.h"
#include "excep.h"
-#include "security.h"
#include "eeconfig.h"
#include "corhost.h"
#include "nativeoverlapped.h"
diff --git a/src/vm/comtoclrcall.cpp b/src/vm/comtoclrcall.cpp
index b6d59a859f..11f522431d 100644
--- a/src/vm/comtoclrcall.cpp
+++ b/src/vm/comtoclrcall.cpp
@@ -28,7 +28,6 @@
#include "siginfo.hpp"
#include "comcallablewrapper.h"
#include "field.h"
-#include "security.h"
#include "virtualcallstub.h"
#include "dllimport.h"
#include "mlinfo.h"
@@ -425,45 +424,6 @@ void COMToCLRInvokeTarget(PCODE pManagedTarget, OBJECTREF pObject, ComCallMethod
InvokeStub(pCMD, pManagedTarget, pObject, pFrame, pThread, pRetValOut);
}
-bool COMToCLRWorkerBody_SecurityCheck(ComCallMethodDesc * pCMD, MethodDesc * pMD, Thread * pThread, UINT64 * pRetValOut)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_TRIGGERS;
- MODE_COOPERATIVE;
- SO_TOLERANT;
- }
- CONTRACTL_END;
-
- bool result = true;
-
- BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, { *pRetValOut = COR_E_STACKOVERFLOW; return false; } );
-
- EX_TRY
- {
-
- // Need to check for the presence of a security link demand on the target
- // method. If we're hosted inside of an app domain with security, we perform
- // the link demand against that app domain's grant set.
- Security::CheckLinkDemandAgainstAppDomain(pMD);
-
- if (pCMD->IsEarlyBoundUnsafe())
- COMPlusThrow(kSecurityException);
-
- }
- EX_CATCH
- {
- *pRetValOut = SetupErrorInfo(GET_THROWABLE());
- result = false;
- }
- EX_END_CATCH(SwallowAllExceptions);
-
- END_SO_INTOLERANT_CODE;
-
- return result;
-}
-
NOINLINE
void COMToCLRWorkerBody_Rare(Thread * pThread, ComMethodFrame * pFrame, ComCallWrapper * pWrap,
MethodDesc * pRealMD, ComCallMethodDesc * pCMD, DWORD maskedFlags,
@@ -482,17 +442,12 @@ void COMToCLRWorkerBody_Rare(Thread * pThread, ComMethodFrame * pFrame, ComCallW
OBJECTREF pObject;
int fpReturnSize = 0;
- if (maskedFlags & enum_NeedsSecurityCheck)
- {
- if (!COMToCLRWorkerBody_SecurityCheck(pCMD, pRealMD, pThread, pRetValOut))
- return;
- }
if (maskedFlags & enum_NativeR8Retval)
fpReturnSize = 8;
if (maskedFlags & enum_NativeR4Retval)
fpReturnSize = 4;
- maskedFlags &= ~(enum_NeedsSecurityCheck|enum_NativeR4Retval|enum_NativeR8Retval);
+ maskedFlags &= ~(enum_NativeR4Retval|enum_NativeR8Retval);
CONSISTENCY_CHECK(maskedFlags != ( enum_IsWinRTCtor|enum_IsVirtual));
CONSISTENCY_CHECK(maskedFlags != (enum_IsDelegateInvoke|enum_IsWinRTCtor|enum_IsVirtual));
@@ -573,7 +528,6 @@ void COMToCLRWorkerBody(
OBJECTREF pObject;
DWORD mask = (
- enum_NeedsSecurityCheck |
enum_IsDelegateInvoke |
enum_IsWinRTCtor |
enum_IsVirtual |
@@ -1104,11 +1058,6 @@ static void FieldCallWorkerBody(Thread *pThread, ComMethodFrame* pFrame)
}
#endif // PROFILING_SUPPORTED
- if (pCMD->IsEarlyBoundUnsafe())
- {
- COMPlusThrow(kSecurityException);
- }
-
UINT64 retVal;
InvokeStub(pCMD, NULL, pWrap->GetObjectRef(), pFrame, pThread, &retVal);
@@ -1338,20 +1287,6 @@ void ComCallMethodDesc::InitMethod(MethodDesc *pMD, MethodDesc *pInterfaceMD, BO
{
// Initialize the native type information size of native stack, native retval flags, etc).
InitNativeInfo();
-
- // If this interface method is implemented on a class which lives
- // in an assembly without UnmanagedCodePermission, then
- // we mark the ComCallMethodDesc as unsafe for being called early-bound.
- Module* pModule = pMD->GetModule();
- if (!Security::CanCallUnmanagedCode(pModule))
- {
- m_flags |= (enum_NeedsSecurityCheck | enum_IsEarlyBoundUnsafe);
- }
- else if (pMD->RequiresLinktimeCheck())
- {
- // remember that we have to call Security::CheckLinkDemandAgainstAppDomain at invocation time
- m_flags |= enum_NeedsSecurityCheck;
- }
}
if (pMD->IsEEImpl() && COMDelegate::IsDelegateInvokeMethod(pMD))
@@ -1384,15 +1319,6 @@ void ComCallMethodDesc::InitField(FieldDesc* pFD, BOOL isGetter)
{
// Initialize the native type information size of native stack, native retval flags, etc).
InitNativeInfo();
-
- // If this interface method is implemented on a class which lives
- // in an assembly without UnmanagedCodePermission, then
- // we mark the ComCallMethodDesc as unsafe for being called early-bound.
- Module* pModule = pFD->GetModule();
- if (!Security::CanCallUnmanagedCode(pModule))
- {
- m_flags |= enum_IsEarlyBoundUnsafe;
- }
}
};
diff --git a/src/vm/comtoclrcall.h b/src/vm/comtoclrcall.h
index 145aaadbd7..d2f3891993 100644
--- a/src/vm/comtoclrcall.h
+++ b/src/vm/comtoclrcall.h
@@ -29,10 +29,10 @@ enum ComCallFlags
enum_NativeHResultRetVal = 0x0040, // Native ret val is an HRESULT
enum_NativeBoolRetVal = 0x0080, // Native ret val is 0 in the case of failure
enum_NativeVoidRetVal = 0x0100, // Native ret val is void
- enum_IsEarlyBoundUnsafe = 0x0200, // Is unsafe to be called early-bound
+ // unused = 0x0200,
enum_HasMarshalError = 0x0400, // The signature is not marshalable and m_StackBytes is a guess
enum_IsDelegateInvoke = 0x0800, // The method is an 'Invoke' on a delegate
- enum_NeedsSecurityCheck = 0x1000, // Security check is needed at every invocation
+ // unused = 0x1000,
enum_IsWinRTCall = 0x2000, // The method is declared on a WinRT interface/delegate
enum_IsWinRTCtor = 0x4000, // The method is a WinRT constructor
enum_IsWinRTStatic = 0x8000, // The method is a WinRT static
@@ -119,18 +119,6 @@ public:
return (m_flags & enum_IsFieldCall);
}
- BOOL IsEarlyBoundUnsafe()
- {
- LIMITED_METHOD_CONTRACT;
- return (m_flags & enum_IsEarlyBoundUnsafe);
- }
-
- BOOL NeedsSecurityCheck()
- {
- LIMITED_METHOD_CONTRACT;
- return (m_flags & enum_NeedsSecurityCheck);
- }
-
BOOL IsMethodCall()
{
WRAPPER_NO_CONTRACT;
diff --git a/src/vm/comutilnative.cpp b/src/vm/comutilnative.cpp
index b75f684992..766336ee7f 100644
--- a/src/vm/comutilnative.cpp
+++ b/src/vm/comutilnative.cpp
@@ -2624,14 +2624,146 @@ FCIMPL6(INT32, ManagedLoggingHelper::GetRegistryLoggingValues, CLR_BOOL* bLoggin
}
FCIMPLEND
-// Return true if the valuetype does not contain pointer and is tightly packed
+static BOOL HasOverriddenMethod(MethodTable* mt, MethodTable* classMT, WORD methodSlot)
+{
+ CONTRACTL{
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ SO_TOLERANT;
+ } CONTRACTL_END;
+
+ _ASSERTE(mt != NULL);
+ _ASSERTE(classMT != NULL);
+ _ASSERTE(methodSlot != 0);
+
+ PCODE actual = mt->GetRestoredSlot(methodSlot);
+ PCODE base = classMT->GetRestoredSlot(methodSlot);
+
+ if (actual == base)
+ {
+ return FALSE;
+ }
+
+ if (!classMT->IsZapped())
+ {
+ // If mscorlib is JITed, the slots can be patched and thus we need to compare the actual MethodDescs
+ // to detect match reliably
+ if (MethodTable::GetMethodDescForSlotAddress(actual) == MethodTable::GetMethodDescForSlotAddress(base))
+ {
+ return FALSE;
+ }
+ }
+
+ return TRUE;
+}
+
+static BOOL CanCompareBitsOrUseFastGetHashCode(MethodTable* mt)
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_COOPERATIVE;
+ } CONTRACTL_END;
+
+ _ASSERTE(mt != NULL);
+
+ if (mt->HasCheckedCanCompareBitsOrUseFastGetHashCode())
+ {
+ return mt->CanCompareBitsOrUseFastGetHashCode();
+ }
+
+ if (mt->ContainsPointers()
+ || mt->IsNotTightlyPacked())
+ {
+ mt->SetHasCheckedCanCompareBitsOrUseFastGetHashCode();
+ return FALSE;
+ }
+
+ MethodTable* valueTypeMT = MscorlibBinder::GetClass(CLASS__VALUE_TYPE);
+ WORD slotEquals = MscorlibBinder::GetMethod(METHOD__VALUE_TYPE__EQUALS)->GetSlot();
+ WORD slotGetHashCode = MscorlibBinder::GetMethod(METHOD__VALUE_TYPE__GET_HASH_CODE)->GetSlot();
+
+ // Check the input type.
+ if (HasOverriddenMethod(mt, valueTypeMT, slotEquals)
+ || HasOverriddenMethod(mt, valueTypeMT, slotGetHashCode))
+ {
+ mt->SetHasCheckedCanCompareBitsOrUseFastGetHashCode();
+
+ // If overridden Equals or GetHashCode found, stop searching further.
+ return FALSE;
+ }
+
+ BOOL canCompareBitsOrUseFastGetHashCode = TRUE;
+
+ // The type itself did not override Equals or GetHashCode, go for its fields.
+ ApproxFieldDescIterator iter = ApproxFieldDescIterator(mt, ApproxFieldDescIterator::INSTANCE_FIELDS);
+ for (FieldDesc* pField = iter.Next(); pField != NULL; pField = iter.Next())
+ {
+ if (pField->GetFieldType() == ELEMENT_TYPE_VALUETYPE)
+ {
+ // Check current field type.
+ MethodTable* fieldMethodTable = pField->GetApproxFieldTypeHandleThrowing().GetMethodTable();
+ if (!CanCompareBitsOrUseFastGetHashCode(fieldMethodTable))
+ {
+ canCompareBitsOrUseFastGetHashCode = FALSE;
+ break;
+ }
+ }
+ else if (pField->GetFieldType() == ELEMENT_TYPE_R8
+ || pField->GetFieldType() == ELEMENT_TYPE_R4)
+ {
+ // We have double/single field, cannot compare in fast path.
+ canCompareBitsOrUseFastGetHashCode = FALSE;
+ break;
+ }
+ }
+
+ // We've gone through all instance fields. It's time to cache the result.
+ // Note SetCanCompareBitsOrUseFastGetHashCode(BOOL) ensures the checked flag
+ // and canCompare flag being set atomically to avoid race.
+ mt->SetCanCompareBitsOrUseFastGetHashCode(canCompareBitsOrUseFastGetHashCode);
+
+ return canCompareBitsOrUseFastGetHashCode;
+}
+
+NOINLINE static FC_BOOL_RET CanCompareBitsHelper(MethodTable* mt, OBJECTREF objRef)
+{
+ FC_INNER_PROLOG(ValueTypeHelper::CanCompareBits);
+
+ _ASSERTE(mt != NULL);
+ _ASSERTE(objRef != NULL);
+
+ BOOL ret = FALSE;
+
+ HELPER_METHOD_FRAME_BEGIN_RET_ATTRIB_1(Frame::FRAME_ATTR_EXACT_DEPTH|Frame::FRAME_ATTR_CAPTURE_DEPTH_2, objRef);
+
+ ret = CanCompareBitsOrUseFastGetHashCode(mt);
+
+ HELPER_METHOD_FRAME_END();
+ FC_INNER_EPILOG();
+
+ FC_RETURN_BOOL(ret);
+}
+
+// Return true if the valuetype does not contain pointer, is tightly packed,
+// does not have floating point number field and does not override Equals method.
FCIMPL1(FC_BOOL_RET, ValueTypeHelper::CanCompareBits, Object* obj)
{
FCALL_CONTRACT;
_ASSERTE(obj != NULL);
MethodTable* mt = obj->GetMethodTable();
- FC_RETURN_BOOL(!mt->ContainsPointers() && !mt->IsNotTightlyPacked());
+
+ if (mt->HasCheckedCanCompareBitsOrUseFastGetHashCode())
+ {
+ FC_RETURN_BOOL(mt->CanCompareBitsOrUseFastGetHashCode());
+ }
+
+ OBJECTREF objRef(obj);
+
+ FC_INNER_RETURN(FC_BOOL_RET, CanCompareBitsHelper(mt, objRef));
}
FCIMPLEND
@@ -2650,12 +2782,6 @@ FCIMPL2(FC_BOOL_RET, ValueTypeHelper::FastEqualsCheck, Object* obj1, Object* obj
}
FCIMPLEND
-static BOOL CanUseFastGetHashCodeHelper(MethodTable *mt)
-{
- LIMITED_METHOD_CONTRACT;
- return !mt->ContainsPointers() && !mt->IsNotTightlyPacked();
-}
-
static INT32 FastGetValueTypeHashCodeHelper(MethodTable *mt, void *pObjRef)
{
CONTRACTL
@@ -2664,7 +2790,6 @@ static INT32 FastGetValueTypeHashCodeHelper(MethodTable *mt, void *pObjRef)
GC_NOTRIGGER;
MODE_COOPERATIVE;
SO_TOLERANT;
- PRECONDITION(CanUseFastGetHashCodeHelper(mt));
} CONTRACTL_END;
INT32 hashCode = 0;
@@ -2690,9 +2815,19 @@ static INT32 RegularGetValueTypeHashCode(MethodTable *mt, void *pObjRef)
INT32 hashCode = 0;
INT32 *pObj = (INT32*)pObjRef;
+ BOOL canUseFastGetHashCodeHelper = FALSE;
+ if (mt->HasCheckedCanCompareBitsOrUseFastGetHashCode())
+ {
+ canUseFastGetHashCodeHelper = mt->CanCompareBitsOrUseFastGetHashCode();
+ }
+ else
+ {
+ canUseFastGetHashCodeHelper = CanCompareBitsOrUseFastGetHashCode(mt);
+ }
+
// While we shouln't get here directly from ValueTypeHelper::GetHashCode, if we recurse we need to
// be able to handle getting the hashcode for an embedded structure whose hashcode is computed by the fast path.
- if (CanUseFastGetHashCodeHelper(mt))
+ if (canUseFastGetHashCodeHelper)
{
return FastGetValueTypeHashCodeHelper(mt, pObjRef);
}
@@ -2797,17 +2932,29 @@ FCIMPL1(INT32, ValueTypeHelper::GetHashCode, Object* objUNSAFE)
// we munge the class index with two big prime numbers
hashCode = typeID * 711650207 + 2506965631U;
- if (CanUseFastGetHashCodeHelper(pMT))
+ BOOL canUseFastGetHashCodeHelper = FALSE;
+ if (pMT->HasCheckedCanCompareBitsOrUseFastGetHashCode())
+ {
+ canUseFastGetHashCodeHelper = pMT->CanCompareBitsOrUseFastGetHashCode();
+ }
+ else
+ {
+ HELPER_METHOD_FRAME_BEGIN_RET_1(obj);
+ canUseFastGetHashCodeHelper = CanCompareBitsOrUseFastGetHashCode(pMT);
+ HELPER_METHOD_FRAME_END();
+ }
+
+ if (canUseFastGetHashCodeHelper)
{
hashCode ^= FastGetValueTypeHashCodeHelper(pMT, obj->UnBox());
}
else
{
- HELPER_METHOD_FRAME_BEGIN_RET_1(obj);
+ HELPER_METHOD_FRAME_BEGIN_RET_1(obj);
hashCode ^= RegularGetValueTypeHashCode(pMT, obj->UnBox());
HELPER_METHOD_FRAME_END();
}
-
+
return hashCode;
}
FCIMPLEND
@@ -2850,14 +2997,11 @@ COMNlsHashProvider::COMNlsHashProvider()
{
LIMITED_METHOD_CONTRACT;
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
- bUseRandomHashing = FALSE;
pEntropy = NULL;
pDefaultSeed = NULL;
-#endif // FEATURE_RANDOMIZED_STRING_HASHING
}
-INT32 COMNlsHashProvider::HashString(LPCWSTR szStr, SIZE_T strLen, BOOL forceRandomHashing, INT64 additionalEntropy)
+INT32 COMNlsHashProvider::HashString(LPCWSTR szStr, SIZE_T strLen)
{
CONTRACTL {
THROWS;
@@ -2866,40 +3010,15 @@ INT32 COMNlsHashProvider::HashString(LPCWSTR szStr, SIZE_T strLen, BOOL forceRan
}
CONTRACTL_END;
-#ifndef FEATURE_RANDOMIZED_STRING_HASHING
- _ASSERTE(forceRandomHashing == false);
- _ASSERTE(additionalEntropy == 0);
-#endif
-
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
- if(bUseRandomHashing || forceRandomHashing)
- {
- int marvinResult[SYMCRYPT_MARVIN32_RESULT_SIZE / sizeof(int)];
-
- if(additionalEntropy == 0)
- {
- SymCryptMarvin32(GetDefaultSeed(), (PCBYTE) szStr, strLen * sizeof(WCHAR), (PBYTE) &marvinResult);
- }
- else
- {
- SYMCRYPT_MARVIN32_EXPANDED_SEED seed;
- CreateMarvin32Seed(additionalEntropy, &seed);
- SymCryptMarvin32(&seed, (PCBYTE) szStr, strLen * sizeof(WCHAR), (PBYTE) &marvinResult);
- }
+ int marvinResult[SYMCRYPT_MARVIN32_RESULT_SIZE / sizeof(int)];
+
+ SymCryptMarvin32(GetDefaultSeed(), (PCBYTE) szStr, strLen * sizeof(WCHAR), (PBYTE) &marvinResult);
- return marvinResult[0] ^ marvinResult[1];
- }
- else
- {
-#endif // FEATURE_RANDOMIZED_STRING_HASHING
- return ::HashString(szStr);
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
- }
-#endif // FEATURE_RANDOMIZED_STRING_HASHING
+ return marvinResult[0] ^ marvinResult[1];
}
-INT32 COMNlsHashProvider::HashSortKey(PCBYTE pSrc, SIZE_T cbSrc, BOOL forceRandomHashing, INT64 additionalEntropy)
+INT32 COMNlsHashProvider::HashSortKey(PCBYTE pSrc, SIZE_T cbSrc)
{
CONTRACTL {
THROWS;
@@ -2908,141 +3027,15 @@ INT32 COMNlsHashProvider::HashSortKey(PCBYTE pSrc, SIZE_T cbSrc, BOOL forceRando
}
CONTRACTL_END;
-#ifndef FEATURE_RANDOMIZED_STRING_HASHING
- _ASSERTE(forceRandomHashing == false);
- _ASSERTE(additionalEntropy == 0);
-#endif
-
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
- if(bUseRandomHashing || forceRandomHashing)
- {
- int marvinResult[SYMCRYPT_MARVIN32_RESULT_SIZE / sizeof(int)];
-
- // Sort Keys are terminated with a null byte which we didn't hash using the old algorithm,
- // so we don't have it with Marvin32 either.
- if(additionalEntropy == 0)
- {
- SymCryptMarvin32(GetDefaultSeed(), pSrc, cbSrc - 1, (PBYTE) &marvinResult);
- }
- else
- {
- SYMCRYPT_MARVIN32_EXPANDED_SEED seed;
- CreateMarvin32Seed(additionalEntropy, &seed);
- SymCryptMarvin32(&seed, pSrc, cbSrc - 1, (PBYTE) &marvinResult);
- }
-
- return marvinResult[0] ^ marvinResult[1];
- }
- else
- {
-#endif // FEATURE_RANDOMIZED_STRING_HASHING
- // Ok, lets build the hashcode -- mostly lifted from GetHashCode() in String.cs, for strings.
- int hash1 = 5381;
- int hash2 = hash1;
- const BYTE *pB = pSrc;
- BYTE c;
-
- while (pB != 0 && *pB != 0) {
- hash1 = ((hash1 << 5) + hash1) ^ *pB;
- c = pB[1];
-
- //
- // FUTURE: Update NewAPis::LCMapStringEx to perhaps use a different, bug free, Win32 API on Win2k3 to workaround the issue discussed below.
- //
- // On Win2k3 Server, LCMapStringEx(LCMAP_SORTKEY) output does not correspond to CompareString in all cases, breaking the .NET GetHashCode<->Equality Contract
- // Due to a fluke in our GetHashCode method, we avoided this issue due to the break out of the loop on the binary-zero byte.
- //
- if (c == 0)
- break;
-
- hash2 = ((hash2 << 5) + hash2) ^ c;
- pB += 2;
- }
-
- return hash1 + (hash2 * 1566083941);
-
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
- }
-#endif // FEATURE_RANDOMIZED_STRING_HASHING
-
-}
-
-INT32 COMNlsHashProvider::HashiStringKnownLower80(LPCWSTR szStr, INT32 strLen, BOOL forceRandomHashing, INT64 additionalEntropy)
-{
- CONTRACTL {
- THROWS;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
-#ifndef FEATURE_RANDOMIZED_STRING_HASHING
- _ASSERTE(forceRandomHashing == false);
- _ASSERTE(additionalEntropy == 0);
-#endif
-
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
- if(bUseRandomHashing || forceRandomHashing)
- {
- WCHAR buf[SYMCRYPT_MARVIN32_INPUT_BLOCK_SIZE * 8];
- SYMCRYPT_MARVIN32_STATE marvinState;
- SYMCRYPT_MARVIN32_EXPANDED_SEED seed;
-
- if(additionalEntropy == 0)
- {
- SymCryptMarvin32Init(&marvinState, GetDefaultSeed());
- }
- else
- {
- CreateMarvin32Seed(additionalEntropy, &seed);
- SymCryptMarvin32Init(&marvinState, &seed);
- }
-
- LPCWSTR szEnd = szStr + strLen;
-
- const UINT A_TO_Z_RANGE = (UINT)('z' - 'a');
-
- while (szStr != szEnd)
- {
- size_t count = (sizeof(buf) / sizeof(buf[0]));
-
- if ((size_t)(szEnd - szStr) < count)
- count = (size_t)(szEnd - szStr);
-
- for (size_t i = 0; i<count; i++)
- {
- WCHAR c = szStr[i];
-
- if ((UINT)(c - 'a') <= A_TO_Z_RANGE) // if (c >='a' && c <= 'z')
- {
- //If we have a lowercase character, ANDing off 0x20
- // will make it an uppercase character.
- c &= ~0x20;
- }
-
- buf[i] = c;
- }
-
- szStr += count;
-
- SymCryptMarvin32Append(&marvinState, (PCBYTE) &buf, sizeof(WCHAR) * count);
- }
+ int marvinResult[SYMCRYPT_MARVIN32_RESULT_SIZE / sizeof(int)];
+
+ // Sort Keys are terminated with a null byte which we didn't hash using the old algorithm,
+ // so we don't have it with Marvin32 either.
+ SymCryptMarvin32(GetDefaultSeed(), pSrc, cbSrc - 1, (PBYTE) &marvinResult);
- int marvinResult[SYMCRYPT_MARVIN32_RESULT_SIZE / sizeof(int)];
- SymCryptMarvin32Result(&marvinState, (PBYTE) &marvinResult);
- return marvinResult[0] ^ marvinResult[1];
- }
- else
- {
-#endif // FEATURE_RANDOMIZED_STRING_HASHING
- return ::HashiStringKnownLower80(szStr);
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
- }
-#endif // FEATURE_RANDOMIZED_STRING_HASHING
+ return marvinResult[0] ^ marvinResult[1];
}
-
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
void COMNlsHashProvider::InitializeDefaultSeed()
{
CONTRACTL {
@@ -3110,27 +3103,8 @@ PCBYTE COMNlsHashProvider::GetEntropy()
return (PCBYTE) pEntropy;
}
-
-void COMNlsHashProvider::CreateMarvin32Seed(INT64 additionalEntropy, PSYMCRYPT_MARVIN32_EXPANDED_SEED pExpandedMarvinSeed)
-{
- CONTRACTL {
- THROWS;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- INT64 *pEntropy = (INT64*) GetEntropy();
- INT64 entropy;
-
- entropy = *pEntropy ^ additionalEntropy;
-
- SymCryptMarvin32ExpandSeed(pExpandedMarvinSeed, (PCBYTE) &entropy, SYMCRYPT_MARVIN32_SEED_SIZE);
-}
-#endif // FEATURE_RANDOMIZED_STRING_HASHING
-
#ifdef FEATURE_COREFX_GLOBALIZATION
-INT32 QCALLTYPE CoreFxGlobalization::HashSortKey(PCBYTE pSortKey, INT32 cbSortKey, BOOL forceRandomizedHashing, INT64 additionalEntropy)
+INT32 QCALLTYPE CoreFxGlobalization::HashSortKey(PCBYTE pSortKey, INT32 cbSortKey)
{
QCALL_CONTRACT;
@@ -3138,7 +3112,7 @@ INT32 QCALLTYPE CoreFxGlobalization::HashSortKey(PCBYTE pSortKey, INT32 cbSortKe
BEGIN_QCALL;
- retVal = COMNlsHashProvider::s_NlsHashProvider.HashSortKey(pSortKey, cbSortKey, forceRandomizedHashing, additionalEntropy);
+ retVal = COMNlsHashProvider::s_NlsHashProvider.HashSortKey(pSortKey, cbSortKey);
END_QCALL;
diff --git a/src/vm/comutilnative.h b/src/vm/comutilnative.h
index 41df265e91..831e1c071e 100644
--- a/src/vm/comutilnative.h
+++ b/src/vm/comutilnative.h
@@ -27,7 +27,6 @@
#undef GetCurrentTime
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
#pragma warning(push)
#pragma warning(disable:4324)
#if !defined(CROSS_COMPILE) && defined(_TARGET_ARM_) && !defined(PLATFORM_UNIX)
@@ -35,7 +34,6 @@
#endif
#include "marvin32.h"
#pragma warning(pop)
-#endif
//
//
@@ -260,33 +258,24 @@ class COMNlsHashProvider {
public:
COMNlsHashProvider();
- INT32 HashString(LPCWSTR szStr, SIZE_T strLen, BOOL forceRandomHashing, INT64 additionalEntropy);
- INT32 HashSortKey(PCBYTE pSrc, SIZE_T cbSrc, BOOL forceRandomHashing, INT64 additionalEntropy);
- INT32 HashiStringKnownLower80(LPCWSTR lpszStr, INT32 strLen, BOOL forceRandomHashing, INT64 additionalEntropy);
+ INT32 HashString(LPCWSTR szStr, SIZE_T strLen);
+ INT32 HashSortKey(PCBYTE pSrc, SIZE_T cbSrc);
static COMNlsHashProvider s_NlsHashProvider;
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
- void SetUseRandomHashing(BOOL useRandomHashing) { LIMITED_METHOD_CONTRACT; bUseRandomHashing = useRandomHashing; }
- BOOL GetUseRandomHashing() { LIMITED_METHOD_CONTRACT; return bUseRandomHashing; }
-
-
private:
- BOOL bUseRandomHashing;
PBYTE pEntropy;
PCSYMCRYPT_MARVIN32_EXPANDED_SEED pDefaultSeed;
PCBYTE GetEntropy();
PCSYMCRYPT_MARVIN32_EXPANDED_SEED GetDefaultSeed();
void InitializeDefaultSeed();
- void CreateMarvin32Seed(INT64 additionalEntropy, PSYMCRYPT_MARVIN32_EXPANDED_SEED pExpandedMarvinSeed);
-#endif // FEATURE_RANDOMIZED_STRING_HASHING
};
#ifdef FEATURE_COREFX_GLOBALIZATION
class CoreFxGlobalization {
public:
- static INT32 QCALLTYPE HashSortKey(PCBYTE pSortKey, INT32 cbSortKey, BOOL forceRandomizedHashing, INT64 additionalEntropy);
+ static INT32 QCALLTYPE HashSortKey(PCBYTE pSortKey, INT32 cbSortKey);
};
#endif // FEATURE_COREFX_GLOBALIZATION
diff --git a/src/vm/crossgen/CMakeLists.txt b/src/vm/crossgen/CMakeLists.txt
index 805e932dda..8c706885b8 100644
--- a/src/vm/crossgen/CMakeLists.txt
+++ b/src/vm/crossgen/CMakeLists.txt
@@ -36,6 +36,7 @@ set(VM_CROSSGEN_SOURCES
../generics.cpp
../genmeth.cpp
../hash.cpp
+ ../ilinstrumentation.cpp
../ilmarshalers.cpp
../ilstubcache.cpp
../ilstubresolver.cpp
@@ -46,7 +47,6 @@ set(VM_CROSSGEN_SOURCES
../contractimpl.cpp
../jitinterface.cpp
../loaderallocator.cpp
- ../listlock.cpp
../memberload.cpp
../method.cpp
../methodimpl.cpp
diff --git a/src/vm/crossgen_mscorlib/CMakeLists.txt b/src/vm/crossgen_mscorlib/CMakeLists.txt
index 598ee9952c..12fdf9064f 100644
--- a/src/vm/crossgen_mscorlib/CMakeLists.txt
+++ b/src/vm/crossgen_mscorlib/CMakeLists.txt
@@ -6,7 +6,6 @@ add_definitions(
-DFEATURE_EVENT_TRACE=1
-DFEATURE_LOADER_OPTIMIZATION
-DFEATURE_MULTICOREJIT
- -DFEATURE_RANDOMIZED_STRING_HASHING
-DFEATURE_VERSIONING_LOG
)
diff --git a/src/vm/crossgencompile.cpp b/src/vm/crossgencompile.cpp
index 367112e285..c4b9d3dfc3 100644
--- a/src/vm/crossgencompile.cpp
+++ b/src/vm/crossgencompile.cpp
@@ -16,7 +16,6 @@
#include "comdelegate.h"
#include "compile.h"
-#include "security.h"
#include "invokeutil.h"
#include "comcallablewrapper.h"
@@ -436,7 +435,3 @@ BOOL AppDomain::BindingByManifestFile()
{
return FALSE;
}
-
-ReJitManager::ReJitManager()
-{
-}
diff --git a/src/vm/crst.h b/src/vm/crst.h
index a353c6ea44..fa8c307f3f 100644
--- a/src/vm/crst.h
+++ b/src/vm/crst.h
@@ -115,14 +115,15 @@ class CrstBase
friend class Thread;
friend class ThreadStore;
friend class ThreadSuspend;
-friend class ListLock;
-friend class ListLockEntry;
+template <typename ELEMENT>
+friend class ListLockBase;
+template <typename ELEMENT>
+friend class ListLockEntryBase;
//friend class CExecutionEngine;
friend struct SavedExceptionInfo;
friend void EEEnterCriticalSection(CRITSEC_COOKIE cookie);
friend void EELeaveCriticalSection(CRITSEC_COOKIE cookie);
-friend class ReJitPublishMethodHolder;
-friend class ReJitPublishMethodTableHolder;
+friend class CodeVersionManager;
friend class Debugger;
friend class Crst;
diff --git a/src/vm/customattribute.cpp b/src/vm/customattribute.cpp
index 60e002eb71..6c765414c3 100644
--- a/src/vm/customattribute.cpp
+++ b/src/vm/customattribute.cpp
@@ -10,7 +10,6 @@
#include "threads.h"
#include "excep.h"
#include "corerror.h"
-#include "security.h"
#include "classnames.h"
#include "fcall.h"
#include "assemblynative.hpp"
diff --git a/src/vm/dataimage.cpp b/src/vm/dataimage.cpp
index fc584d7b39..4e276fe460 100644
--- a/src/vm/dataimage.cpp
+++ b/src/vm/dataimage.cpp
@@ -738,9 +738,7 @@ FORCEINLINE static CorCompileSection GetSectionForNodeType(ZapNodeType type)
// SECTION_READONLY_WARM
case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE):
- case NodeTypeForItemKind(DataImage::ITEM_VTABLE_CHUNK):
case NodeTypeForItemKind(DataImage::ITEM_INTERFACE_MAP):
- case NodeTypeForItemKind(DataImage::ITEM_DICTIONARY):
case NodeTypeForItemKind(DataImage::ITEM_DISPATCH_MAP):
case NodeTypeForItemKind(DataImage::ITEM_GENERICS_STATIC_FIELDDESCS):
case NodeTypeForItemKind(DataImage::ITEM_GC_STATIC_HANDLES_COLD):
@@ -750,6 +748,10 @@ FORCEINLINE static CorCompileSection GetSectionForNodeType(ZapNodeType type)
case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_SIG_READONLY_WARM):
return CORCOMPILE_SECTION_READONLY_WARM;
+ case NodeTypeForItemKind(DataImage::ITEM_DICTIONARY):
+ case NodeTypeForItemKind(DataImage::ITEM_VTABLE_CHUNK):
+ return CORCOMPILE_SECTION_READONLY_VCHUNKS_AND_DICTIONARY;
+
// SECTION_CLASS_COLD
case NodeTypeForItemKind(DataImage::ITEM_PARAM_TYPEDESC):
case NodeTypeForItemKind(DataImage::ITEM_ARRAY_TYPEDESC):
diff --git a/src/vm/dataimage.h b/src/vm/dataimage.h
index 5d48a710e7..0167ec5762 100644
--- a/src/vm/dataimage.h
+++ b/src/vm/dataimage.h
@@ -309,8 +309,58 @@ public:
void FixupPointerField(PVOID p, SSIZE_T offset);
void FixupRelativePointerField(PVOID p, SSIZE_T offset);
+ template<typename T, typename PT>
+ void FixupPlainOrRelativePointerField(const T *base, const RelativePointer<PT> T::* pPointerFieldMember)
+ {
+ STANDARD_VM_CONTRACT;
+ SSIZE_T offset = (SSIZE_T) &(base->*pPointerFieldMember) - (SSIZE_T) base;
+ FixupRelativePointerField((PVOID)base, offset);
+ }
+
+ template<typename T, typename C, typename PT>
+ void FixupPlainOrRelativePointerField(const T *base, const C T::* pFirstPointerFieldMember, const RelativePointer<PT> C::* pSecondPointerFieldMember)
+ {
+ STANDARD_VM_CONTRACT;
+ const RelativePointer<PT> *ptr = &(base->*pFirstPointerFieldMember.*pSecondPointerFieldMember);
+ SSIZE_T offset = (SSIZE_T) ptr - (SSIZE_T) base;
+ FixupRelativePointerField((PVOID)base, offset);
+ }
+
+ template<typename T, typename PT>
+ void FixupPlainOrRelativePointerField(const T *base, const PlainPointer<PT> T::* pPointerFieldMember)
+ {
+ STANDARD_VM_CONTRACT;
+ SSIZE_T offset = (SSIZE_T) &(base->*pPointerFieldMember) - (SSIZE_T) base;
+ FixupPointerField((PVOID)base, offset);
+ }
+
+ template<typename T, typename C, typename PT>
+ void FixupPlainOrRelativePointerField(const T *base, const C T::* pFirstPointerFieldMember, const PlainPointer<PT> C::* pSecondPointerFieldMember)
+ {
+ STANDARD_VM_CONTRACT;
+ const PlainPointer<PT> *ptr = &(base->*pFirstPointerFieldMember.*pSecondPointerFieldMember);
+ SSIZE_T offset = (SSIZE_T) ptr - (SSIZE_T) base;
+ FixupPointerField((PVOID)base, offset);
+ }
+
void FixupField(PVOID p, SSIZE_T offset, PVOID pTarget, SSIZE_T targetOffset = 0, ZapRelocationType type = IMAGE_REL_BASED_PTR);
+ template<typename T, typename PT>
+ void FixupPlainOrRelativeField(const T *base, const RelativePointer<PT> T::* pPointerFieldMember, PVOID pTarget, SSIZE_T targetOffset = 0)
+ {
+ STANDARD_VM_CONTRACT;
+ SSIZE_T offset = (SSIZE_T) &(base->*pPointerFieldMember) - (SSIZE_T) base;
+ FixupField((PVOID)base, offset, pTarget, targetOffset, IMAGE_REL_BASED_RELPTR);
+ }
+
+ template<typename T, typename PT>
+ void FixupPlainOrRelativeField(const T *base, const PlainPointer<PT> T::* pPointerFieldMember, PVOID pTarget, SSIZE_T targetOffset = 0)
+ {
+ STANDARD_VM_CONTRACT;
+ SSIZE_T offset = (SSIZE_T) &(base->*pPointerFieldMember) - (SSIZE_T) base;
+ FixupField((PVOID)base, offset, pTarget, targetOffset, IMAGE_REL_BASED_PTR);
+ }
+
void FixupFieldToNode(PVOID p, SSIZE_T offset, ZapNode * pTarget, SSIZE_T targetOffset = 0, ZapRelocationType type = IMAGE_REL_BASED_PTR);
void FixupFieldToNode(PVOID p, SSIZE_T offset, ZapStoredStructure * pTarget, SSIZE_T targetOffset = 0, ZapRelocationType type = IMAGE_REL_BASED_PTR)
@@ -318,6 +368,34 @@ public:
return FixupFieldToNode(p, offset, (ZapNode *)pTarget, targetOffset, type);
}
+ template<typename T, typename PT>
+ void FixupPlainOrRelativeFieldToNode(const T *base, const RelativePointer<PT> T::* pPointerFieldMember, ZapNode * pTarget, SSIZE_T targetOffset = 0)
+ {
+ STANDARD_VM_CONTRACT;
+ SSIZE_T offset = (SSIZE_T) &(base->*pPointerFieldMember) - (SSIZE_T) base;
+ FixupFieldToNode((PVOID)base, offset, pTarget, targetOffset, IMAGE_REL_BASED_RELPTR);
+ }
+
+ template<typename T, typename PT>
+ void FixupPlainOrRelativeFieldToNode(const T *base, const RelativePointer<PT> T::* pPointerFieldMember, ZapStoredStructure * pTarget, SSIZE_T targetOffset = 0)
+ {
+ return FixupPlainOrRelativeFieldToNode(base, pPointerFieldMember, (ZapNode *)pTarget, targetOffset);
+ }
+
+ template<typename T, typename PT>
+ void FixupPlainOrRelativeFieldToNode(const T *base, const PlainPointer<PT> T::* pPointerFieldMember, ZapNode * pTarget, SSIZE_T targetOffset = 0)
+ {
+ STANDARD_VM_CONTRACT;
+ SSIZE_T offset = (SSIZE_T) &(base->*pPointerFieldMember) - (SSIZE_T) base;
+ FixupFieldToNode((PVOID)base, offset, pTarget, targetOffset, IMAGE_REL_BASED_PTR);
+ }
+
+ template<typename T, typename PT>
+ void FixupPlainOrRelativeFieldToNode(const T *base, const PlainPointer<PT> T::* pPointerFieldMember, ZapStoredStructure * pTarget, SSIZE_T targetOffset = 0)
+ {
+ return FixupPlainOrRelativeFieldToNode(base, pPointerFieldMember, (ZapNode *)pTarget, targetOffset);
+ }
+
BOOL IsStored(const void *data)
{ WRAPPER_NO_CONTRACT; return m_structures.LookupPtr(data) != NULL; }
diff --git a/src/vm/debughelp.cpp b/src/vm/debughelp.cpp
index 376b88cd42..23443ceece 100644
--- a/src/vm/debughelp.cpp
+++ b/src/vm/debughelp.cpp
@@ -318,7 +318,7 @@ MethodDesc* AsMethodDesc(size_t addr)
// extra indirection if the address is tagged (the low bit is set).
// That could AV if we don't check it first.
- if (!ppMT->IsTagged((TADDR)ppMT) || isMemoryReadable((TADDR)ppMT->GetValuePtr((TADDR)ppMT), sizeof(MethodTable*)))
+ if (!ppMT->IsTagged((TADDR)ppMT) || isMemoryReadable((TADDR)ppMT->GetValuePtr(), sizeof(MethodTable*)))
{
if (AsMethodTable((size_t)RelativeFixupPointer<PTR_MethodTable>::GetValueAtPtr((TADDR)ppMT)) != 0)
{
diff --git a/src/vm/dispatchinfo.cpp b/src/vm/dispatchinfo.cpp
index ee29506d27..492603da05 100644
--- a/src/vm/dispatchinfo.cpp
+++ b/src/vm/dispatchinfo.cpp
@@ -28,7 +28,6 @@
#include "olevariant.h"
#include "commtmemberinfomap.h"
#include "dispparammarshaler.h"
-#include "security.h"
#include "reflectioninvocation.h"
#include "dbginterface.h"
@@ -1588,50 +1587,6 @@ void DispatchInfo::InvokeMemberWorker(DispatchMemberInfo* pDispMemberInfo,
pObjs->MemberInfo = ObjectFromHandle(pDispMemberInfo->m_hndMemberInfo);
MemberType = pDispMemberInfo->GetMemberType();
- // Determine whether the member has a link time security check. If so we
- // need to emulate this (since the caller is obviously not jitted in this
- // case). Only methods and properties can have a link time check.
- MethodDesc *pMDforSecurity = NULL;
-
- if (MemberType == Method)
- {
- MethodDescCallSite getMethodHandle(METHOD__METHOD_BASE__GET_METHODDESC, &pObjs->MemberInfo);
- ARG_SLOT arg = ObjToArgSlot(pObjs->MemberInfo);
- pMDforSecurity = (MethodDesc*) getMethodHandle.Call_RetLPVOID(&arg);
- }
- else if (MemberType == Property)
- {
- MethodDescCallSite getSetter(METHOD__PROPERTY__GET_SETTER, &pObjs->MemberInfo);
- ARG_SLOT args[] =
- {
- ObjToArgSlot(pObjs->MemberInfo),
- BoolToArgSlot(false)
- };
- OBJECTREF method = getSetter.Call_RetOBJECTREF(args);
- if (method == NULL)
- {
- MethodDescCallSite getGetter(METHOD__PROPERTY__GET_GETTER, &pObjs->MemberInfo);
- ARG_SLOT args1[] =
- {
- ObjToArgSlot(pObjs->MemberInfo),
- BoolToArgSlot(false)
- };
- method = getGetter.Call_RetOBJECTREF(args1);
- }
-
- if (method != NULL)
- {
- GCPROTECT_BEGIN(method)
- MethodDescCallSite getMethodHandle(METHOD__METHOD_BASE__GET_METHODDESC, &method);
- ARG_SLOT arg = ObjToArgSlot(method);
- pMDforSecurity = (MethodDesc*) getMethodHandle.Call_RetLPVOID(&arg);
- GCPROTECT_END();
- }
- }
-
- if (pMDforSecurity)
- Security::CheckLinkDemandAgainstAppDomain(pMDforSecurity);
-
switch (MemberType)
{
case Field:
diff --git a/src/vm/dllimport.cpp b/src/vm/dllimport.cpp
index 49c7d7a8b8..e7857e412d 100644
--- a/src/vm/dllimport.cpp
+++ b/src/vm/dllimport.cpp
@@ -19,7 +19,6 @@
#include "dllimport.h"
#include "method.hpp"
#include "siginfo.hpp"
-#include "security.h"
#include "comdelegate.h"
#include "ceeload.h"
#include "mlinfo.h"
@@ -1183,7 +1182,6 @@ public:
#endif // FEATURE_COMINTEROP
LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_NGENEDSTUBFORPROFILING, " NDIRECTSTUB_FL_NGENEDSTUBFORPROFILING\n", facility, level);
LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL, " NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL\n", facility, level);
- LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_HASDECLARATIVESECURITY, " NDIRECTSTUB_FL_HASDECLARATIVESECURITY\n", facility, level);
LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_UNMANAGED_CALLI, " NDIRECTSTUB_FL_UNMANAGED_CALLI\n", facility, level);
LogOneFlag(dwStubFlags, NDIRECTSTUB_FL_TRIGGERCCTOR, " NDIRECTSTUB_FL_TRIGGERCCTOR\n", facility, level);
#ifdef FEATURE_COMINTEROP
@@ -1214,7 +1212,6 @@ public:
NDIRECTSTUB_FL_REVERSE_INTEROP |
NDIRECTSTUB_FL_NGENEDSTUBFORPROFILING |
NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL |
- NDIRECTSTUB_FL_HASDECLARATIVESECURITY |
NDIRECTSTUB_FL_UNMANAGED_CALLI |
NDIRECTSTUB_FL_TRIGGERCCTOR |
#ifdef FEATURE_COMINTEROP
@@ -5010,44 +5007,7 @@ MethodDesc* NDirect::CreateCLRToNativeILStub(
pParamTokenArray = (mdParamDef*)_alloca(numParamTokens * sizeof(mdParamDef));
CollateParamTokens(pModule->GetMDImport(), pSigDesc->m_tkMethodDef, numArgs, pParamTokenArray);
- // for interop vectors that have declarative security, we need
- // to update the stub flags to ensure a unique stub hash
- // is generated based on the marshalling signature AND
- // any declarative security.
- // IMPORTANT: This will only inject the security callouts for
- // interop functionality which has a non-null target MethodDesc.
- // Currently, this is known to exclude things like native
- // function ptrs. It is assumed that if the target is not
- // attribute'able for metadata, then it cannot have declarative
- // security - and that the target is not attributable if it was
- // not passed to this function.
MethodDesc *pMD = pSigDesc->m_pMD;
- if (pMD != NULL && SF_IsForwardStub(dwStubFlags))
- {
- // In an AppX process there is only one fully trusted AppDomain, so there is never any need to insert
- // a security callout on the stubs.
- if (!AppX::IsAppXProcess())
- {
-#ifdef FEATURE_COMINTEROP
- if (pMD->IsComPlusCall() || pMD->IsGenericComPlusCall())
- {
- // To preserve Whidbey behavior, we only enforce the implicit demand for
- // unmanaged code permission.
- MethodTable* pMT = ComPlusCallInfo::FromMethodDesc(pMD)->m_pInterfaceMT;
- if (pMT->ClassRequiresUnmanagedCodeCheck() &&
- !pMD->HasSuppressUnmanagedCodeAccessAttr())
- {
- dwStubFlags |= NDIRECTSTUB_FL_HASDECLARATIVESECURITY;
- }
- }
- else
-#endif // FEATURE_COMPINTEROP
- if (pMD->IsInterceptedForDeclSecurity())
- {
- dwStubFlags |= NDIRECTSTUB_FL_HASDECLARATIVESECURITY;
- }
- }
- }
NewHolder<ILStubState> pStubState;
@@ -5413,8 +5373,7 @@ PCODE JitILStub(MethodDesc* pStubMD)
// A dynamically generated IL stub
//
- CORJIT_FLAGS jitFlags = pStubMD->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags();
- pCode = pStubMD->MakeJitWorker(NULL, jitFlags);
+ pCode = pStubMD->PrepareInitialCode();
_ASSERTE(pCode == pStubMD->GetNativeCode());
}
diff --git a/src/vm/dllimport.h b/src/vm/dllimport.h
index c918f58651..058484c45e 100644
--- a/src/vm/dllimport.h
+++ b/src/vm/dllimport.h
@@ -161,7 +161,7 @@ enum NDirectStubFlags
#endif // FEATURE_COMINTEROP
NDIRECTSTUB_FL_NGENEDSTUBFORPROFILING = 0x00000100,
NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL = 0x00000200,
- NDIRECTSTUB_FL_HASDECLARATIVESECURITY = 0x00000400,
+ // unused = 0x00000400,
NDIRECTSTUB_FL_UNMANAGED_CALLI = 0x00000800,
NDIRECTSTUB_FL_TRIGGERCCTOR = 0x00001000,
#ifdef FEATURE_COMINTEROP
@@ -223,7 +223,6 @@ inline bool SF_IsHRESULTSwapping (DWORD dwStubFlags) { LIMITED_METHOD_CONT
inline bool SF_IsReverseStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_REVERSE_INTEROP)); }
inline bool SF_IsNGENedStubForProfiling(DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_NGENEDSTUBFORPROFILING)); }
inline bool SF_IsDebuggableStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL)); }
-inline bool SF_IsStubWithDemand (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_HASDECLARATIVESECURITY)); }
inline bool SF_IsCALLIStub (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_UNMANAGED_CALLI)); }
inline bool SF_IsStubWithCctorTrigger (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_TRIGGERCCTOR)); }
inline bool SF_IsForNumParamBytes (DWORD dwStubFlags) { LIMITED_METHOD_CONTRACT; return (dwStubFlags < NDIRECTSTUB_FL_INVALID && 0 != (dwStubFlags & NDIRECTSTUB_FL_FOR_NUMPARAMBYTES)); }
@@ -299,10 +298,6 @@ inline void SF_ConsistencyCheck(DWORD dwStubFlags)
CONSISTENCY_CHECK(!(SF_IsFieldGetterStub(dwStubFlags) && !SF_IsHRESULTSwapping(dwStubFlags)));
CONSISTENCY_CHECK(!(SF_IsFieldSetterStub(dwStubFlags) && !SF_IsHRESULTSwapping(dwStubFlags)));
- // Reverse and CALLI stubs don't have demands
- CONSISTENCY_CHECK(!(SF_IsReverseStub(dwStubFlags) && SF_IsStubWithDemand(dwStubFlags)));
- CONSISTENCY_CHECK(!(SF_IsCALLIStub(dwStubFlags) && SF_IsStubWithDemand(dwStubFlags)));
-
// Delegate stubs are not COM
CONSISTENCY_CHECK(!(SF_IsDelegateStub(dwStubFlags) && SF_IsCOMStub(dwStubFlags)));
}
diff --git a/src/vm/dllimportcallback.cpp b/src/vm/dllimportcallback.cpp
index 90c01a496b..8684c12167 100644
--- a/src/vm/dllimportcallback.cpp
+++ b/src/vm/dllimportcallback.cpp
@@ -1111,13 +1111,8 @@ UMEntryThunk* UMEntryThunk::CreateUMEntryThunk()
UMEntryThunk * p;
-#ifdef FEATURE_WINDOWSPHONE
// On the phone, use loader heap to save memory commit of regular executable heap
p = (UMEntryThunk *)(void *)SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->AllocMem(S_SIZE_T(sizeof(UMEntryThunk)));
-#else
- p = new (executable) UMEntryThunk;
- memset (p, 0, sizeof(*p));
-#endif
RETURN p;
}
@@ -1126,11 +1121,10 @@ void UMEntryThunk::Terminate()
{
WRAPPER_NO_CONTRACT;
-#ifdef FEATURE_WINDOWSPHONE
+ _ASSERTE(!SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->IsZeroInit());
+ m_code.Poison();
+
SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->BackoutMem(this, sizeof(UMEntryThunk));
-#else
- DeleteExecutable(this);
-#endif
}
VOID UMEntryThunk::FreeUMEntryThunk(UMEntryThunk* p)
diff --git a/src/vm/dllimportcallback.h b/src/vm/dllimportcallback.h
index af2a0b1d92..e79c5f03ef 100644
--- a/src/vm/dllimportcallback.h
+++ b/src/vm/dllimportcallback.h
@@ -326,10 +326,6 @@ public:
{
DestroyLongWeakHandle(GetObjectHandle());
}
-
-#ifdef _DEBUG
- FillMemory(this, sizeof(*this), 0xcc);
-#endif
}
void Terminate();
diff --git a/src/vm/domainfile.cpp b/src/vm/domainfile.cpp
index 32f35fd39a..e5736b7282 100644
--- a/src/vm/domainfile.cpp
+++ b/src/vm/domainfile.cpp
@@ -16,7 +16,6 @@
#include <shlwapi.h>
-#include "security.h"
#include "invokeutil.h"
#include "eeconfig.h"
#include "dynamicmethod.h"
@@ -1291,10 +1290,6 @@ void DomainFile::Activate()
m_bDisableActivationCheck=TRUE;
pMT->CheckRunClassInitThrowing();
}
- if (g_pConfig->VerifyModulesOnLoad())
- {
- m_pModule->VerifyAllMethods();
- }
#ifdef _DEBUG
if (g_pConfig->ExpandModulesOnLoad())
{
diff --git a/src/vm/dynamicmethod.cpp b/src/vm/dynamicmethod.cpp
index acfea3e7f6..2d0fa9ce56 100644
--- a/src/vm/dynamicmethod.cpp
+++ b/src/vm/dynamicmethod.cpp
@@ -11,7 +11,6 @@
#include "object.h"
#include "method.hpp"
#include "comdelegate.h"
-#include "security.h"
#include "field.h"
#include "contractimpl.h"
#include "nibblemapmacros.h"
diff --git a/src/vm/dynamicmethod.h b/src/vm/dynamicmethod.h
index f9a92b0af0..7fd63e59b9 100644
--- a/src/vm/dynamicmethod.h
+++ b/src/vm/dynamicmethod.h
@@ -287,7 +287,7 @@ private:
public:
// Space for header is reserved immediately before. It is not included in size.
virtual void* AllocMemForCode_NoThrow(size_t header, size_t size, DWORD alignment) DAC_EMPTY_RET(NULL);
-
+
virtual ~HostCodeHeap() DAC_EMPTY();
LoaderAllocator* GetAllocator() { return m_pAllocator; }
@@ -307,6 +307,11 @@ protected:
void FreeMemForCode(void * codeStart);
+#if defined(FEATURE_JIT_PITCHING)
+public:
+ PTR_EEJitManager GetJitManager() { return m_pJitManager; }
+#endif
+
}; // class HostCodeHeap
//---------------------------------------------------------------------------------------
diff --git a/src/vm/ecall.cpp b/src/vm/ecall.cpp
index f3b0099e57..6f5f11b894 100644
--- a/src/vm/ecall.cpp
+++ b/src/vm/ecall.cpp
@@ -36,6 +36,7 @@ static_assert_no_msg(METHOD__STRING__CTORF_FIRST + 1 == METHOD__STRING__CTORF_CH
static_assert_no_msg(METHOD__STRING__CTORF_FIRST + 2 == METHOD__STRING__CTORF_CHAR_COUNT);
static_assert_no_msg(METHOD__STRING__CTORF_FIRST + 3 == METHOD__STRING__CTORF_CHARPTR);
static_assert_no_msg(METHOD__STRING__CTORF_FIRST + 4 == METHOD__STRING__CTORF_CHARPTR_START_LEN);
+static_assert_no_msg(METHOD__STRING__CTORF_FIRST + 5 == METHOD__STRING__CTORF_READONLYSPANOFCHAR);
// ECall::CtorCharXxx has to be in same order as METHOD__STRING__CTORF_XXX
#define ECallCtor_First ECall::CtorCharArrayManaged
@@ -44,8 +45,9 @@ static_assert_no_msg(ECallCtor_First + 1 == ECall::CtorCharArrayStartLengthManag
static_assert_no_msg(ECallCtor_First + 2 == ECall::CtorCharCountManaged);
static_assert_no_msg(ECallCtor_First + 3 == ECall::CtorCharPtrManaged);
static_assert_no_msg(ECallCtor_First + 4 == ECall::CtorCharPtrStartLengthManaged);
+static_assert_no_msg(ECallCtor_First + 5 == ECall::CtorReadOnlySpanOfCharManaged);
-#define NumberOfStringConstructors 5
+#define NumberOfStringConstructors 6
void ECall::PopulateManagedStringConstructors()
{
@@ -557,10 +559,6 @@ LPVOID ECall::GetQCallImpl(MethodDesc * pMD)
("%s::%s is not registered using QCFuncElement macro in ecall.cpp",
pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
- CONSISTENCY_CHECK_MSGF(pMD->HasSuppressUnmanagedCodeAccessAttr(),
- ("%s::%s is not marked with SuppressUnmanagedCodeSecurityAttribute()",
- pMD->m_pszDebugClassName, pMD->m_pszDebugMethodName));
-
DWORD dwAttrs = pMD->GetAttrs();
BOOL fPublicOrProtected = IsMdPublic(dwAttrs) || IsMdFamily(dwAttrs) || IsMdFamORAssem(dwAttrs);
diff --git a/src/vm/ecall.h b/src/vm/ecall.h
index c4fed1ff42..26fa9eb478 100644
--- a/src/vm/ecall.h
+++ b/src/vm/ecall.h
@@ -110,6 +110,7 @@ class ECall
DYNAMICALLY_ASSIGNED_FCALL_IMPL(CtorCharCountManaged, NULL) \
DYNAMICALLY_ASSIGNED_FCALL_IMPL(CtorCharPtrManaged, NULL) \
DYNAMICALLY_ASSIGNED_FCALL_IMPL(CtorCharPtrStartLengthManaged, NULL) \
+ DYNAMICALLY_ASSIGNED_FCALL_IMPL(CtorReadOnlySpanOfCharManaged, NULL) \
DYNAMICALLY_ASSIGNED_FCALL_IMPL(InternalGetCurrentThread, NULL) \
enum
diff --git a/src/vm/ecalllist.h b/src/vm/ecalllist.h
index 39ba874b5a..76be0b172c 100644
--- a/src/vm/ecalllist.h
+++ b/src/vm/ecalllist.h
@@ -109,6 +109,7 @@ FCFuncStart(gStringFuncs)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrChar_RetVoid, CORINFO_INTRINSIC_Illegal, ECall::CtorCharPtrManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrChar_Int_Int_RetVoid, CORINFO_INTRINSIC_Illegal, ECall::CtorCharPtrStartLengthManaged)
FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_Char_Int_RetVoid, CORINFO_INTRINSIC_Illegal, ECall::CtorCharCountManaged)
+ FCDynamicSig(COR_CTOR_METHOD_NAME, &gsig_IM_ReadOnlySpanOfChar_RetVoid, CORINFO_INTRINSIC_Illegal, ECall::CtorReadOnlySpanOfCharManaged)
FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrSByt_RetVoid, COMString::StringInitCharPtr)
FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrSByt_Int_Int_RetVoid, COMString::StringInitCharPtrPartial)
FCFuncElementSig(COR_CTOR_METHOD_NAME, &gsig_IM_PtrSByt_Int_Int_Encoding_RetVoid, COMString::StringInitSBytPtrPartialEx)
@@ -118,17 +119,14 @@ FCFuncStart(gStringFuncs)
FCIntrinsic("get_Chars", COMString::GetCharAt, CORINFO_INTRINSIC_StringGetChar)
FCFuncElement("IsAscii", COMString::IsAscii)
FCFuncElement("CompareOrdinalHelper", COMString::CompareOrdinalEx)
- FCFuncElement("IndexOfAny", COMString::IndexOfCharArray)
+ FCFuncElement("IndexOfCharArray", COMString::IndexOfCharArray)
FCFuncElement("LastIndexOfAny", COMString::LastIndexOfCharArray)
FCFuncElementSig("ReplaceInternal", &gsig_IM_Str_Str_RetStr, COMString::ReplaceString)
#ifdef FEATURE_COMINTEROP
FCFuncElement("SetTrailByte", COMString::FCSetTrailByte)
FCFuncElement("TryGetTrailByte", COMString::FCTryGetTrailByte)
#endif // FEATURE_COMINTEROP
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
FCFuncElement("InternalMarvin32HashString", COMString::Marvin32HashString)
- QCFuncElement("InternalUseRandomizedHashing", COMString::UseRandomizedHashing)
-#endif // FEATURE_RANDOMIZED_STRING_HASHING
FCFuncEnd()
FCFuncStart(gStringBufferFuncs)
@@ -296,6 +294,7 @@ FCFuncStart(gCOMTypeHandleFuncs)
FCFuncElement("IsComObject", RuntimeTypeHandle::IsComObject)
FCFuncElement("IsValueType", RuntimeTypeHandle::IsValueType)
FCFuncElement("IsInterface", RuntimeTypeHandle::IsInterface)
+ FCFuncElement("IsByRefLike", RuntimeTypeHandle::IsByRefLike)
QCFuncElement("_IsVisible", RuntimeTypeHandle::IsVisible)
QCFuncElement("ConstructName", RuntimeTypeHandle::ConstructName)
FCFuncElement("CanCastTo", RuntimeTypeHandle::CanCastTo)
@@ -343,10 +342,6 @@ FCFuncStart(gMetaDataImport)
FCFuncElement("_GetMarshalAs", MetaDataImport::GetMarshalAs)
FCFuncEnd()
-FCFuncStart(gRuntimeFieldInfoFuncs)
- FCFuncElement("PerformVisibilityCheckOnField", ReflectionInvocation::PerformVisibilityCheckOnField)
-FCFuncEnd()
-
FCFuncStart(gSignatureNative)
FCFuncElement("GetSignature", SignatureNative::GetSignature)
FCFuncElement("GetCustomModifiers", SignatureNative::GetCustomModifiers)
@@ -369,6 +364,7 @@ FCFuncStart(gRuntimeMethodHandle)
QCFuncElement("GetMethodInstantiation", RuntimeMethodHandle::GetMethodInstantiation)
FCFuncElement("HasMethodInstantiation", RuntimeMethodHandle::HasMethodInstantiation)
FCFuncElement("IsGenericMethodDefinition", RuntimeMethodHandle::IsGenericMethodDefinition)
+ FCFuncElement("GetGenericParameterCount", RuntimeMethodHandle::GetGenericParameterCount)
FCFuncElement("IsTypicalMethodDefinition", RuntimeMethodHandle::IsTypicalMethodDefinition)
QCFuncElement("GetTypicalMethodDefinition", RuntimeMethodHandle::GetTypicalMethodDefinition)
QCFuncElement("StripMethodInstantiation", RuntimeMethodHandle::StripMethodInstantiation)
@@ -572,6 +568,7 @@ FCFuncStart(gAssemblyFuncs)
FCFuncElement("FCallIsDynamic", AssemblyNative::IsDynamic)
FCFuncElement("nLoad", AssemblyNative::Load)
QCFuncElement("GetType", AssemblyNative::GetType)
+ QCFuncElement("GetForwardedType", AssemblyNative::GetForwardedType)
QCFuncElement("GetManifestResourceInfo", AssemblyNative::GetManifestResourceInfo)
QCFuncElement("GetModules", AssemblyNative::GetModules)
QCFuncElement("GetModule", AssemblyNative::GetModule)
@@ -713,6 +710,7 @@ FCFuncStart(gRuntimeThreadFuncs)
#endif // FEATURE_COMINTEROP
FCFuncElement("InterruptInternal", ThreadNative::Interrupt)
FCFuncElement("JoinInternal", ThreadNative::Join)
+ QCFuncElement("GetOptimalMaxSpinWaitsPerSpinIterationInternal", ThreadNative::GetOptimalMaxSpinWaitsPerSpinIteration)
FCFuncEnd()
FCFuncStart(gThreadFuncs)
@@ -1246,6 +1244,7 @@ FCFuncStart(gEventPipeInternalFuncs)
QCFuncElement("DefineEvent", EventPipeInternal::DefineEvent)
QCFuncElement("DeleteProvider", EventPipeInternal::DeleteProvider)
QCFuncElement("WriteEvent", EventPipeInternal::WriteEvent)
+ QCFuncElement("WriteEventData", EventPipeInternal::WriteEventData)
FCFuncEnd()
#endif // FEATURE_PERFTRACING
@@ -1412,7 +1411,6 @@ FCClassElement("RegistrationServices", "System.Runtime.InteropServices", gRegist
#endif // FEATURE_COMINTEROP_MANAGED_ACTIVATION
#endif // FEATURE_COMINTEROP
-FCClassElement("RtFieldInfo", "System.Reflection", gRuntimeFieldInfoFuncs)
FCClassElement("RuntimeAssembly", "System.Reflection", gAssemblyFuncs)
#ifdef FEATURE_COMINTEROP
FCClassElement("RuntimeClass", "System.Runtime.InteropServices.WindowsRuntime", gRuntimeClassFuncs)
diff --git a/src/vm/eeconfig.cpp b/src/vm/eeconfig.cpp
index 05cdd0aa6c..4c49d1457f 100644
--- a/src/vm/eeconfig.cpp
+++ b/src/vm/eeconfig.cpp
@@ -241,7 +241,6 @@ HRESULT EEConfig::Init()
INDEBUG(fStressLog = true;)
- fVerifyAllOnLoad = false;
#ifdef _DEBUG
fExpandAllOnLoad = false;
fDebuggable = false;
@@ -382,6 +381,14 @@ HRESULT EEConfig::Init()
fTieredCompilation = false;
#endif
+#if defined(FEATURE_GDBJIT) && defined(_DEBUG)
+ pszGDBJitElfDump = NULL;
+#endif // FEATURE_GDBJIT && _DEBUG
+
+#if defined(FEATURE_GDBJIT_FRAME)
+ fGDBJitEmitDebugFrame = false;
+#endif
+
// After initialization, register the code:#GetConfigValueCallback method with code:CLRConfig to let
// CLRConfig access config files. This is needed because CLRConfig lives outside the VM and can't
// statically link to EEConfig.
@@ -1096,9 +1103,6 @@ HRESULT EEConfig::sync()
fEnableRCWCleanupOnSTAShutdown = (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EnableRCWCleanupOnSTAShutdown) != 0);
#endif // FEATURE_COMINTEROP
- //Eager verification of all assemblies.
- fVerifyAllOnLoad = (GetConfigDWORD_DontUse_(CLRConfig::EXTERNAL_VerifyAllOnLoad, fVerifyAllOnLoad) != 0);
-
#ifdef _DEBUG
fExpandAllOnLoad = (GetConfigDWORD_DontUse_(CLRConfig::INTERNAL_ExpandAllOnLoad, fExpandAllOnLoad) != 0);
#endif //_DEBUG
@@ -1240,6 +1244,17 @@ HRESULT EEConfig::sync()
fTieredCompilation = CLRConfig::GetConfigValue(CLRConfig::UNSUPPORTED_TieredCompilation) != 0;
#endif
+#if defined(FEATURE_GDBJIT) && defined(_DEBUG)
+ {
+ LPWSTR pszGDBJitElfDumpW = NULL;
+ CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GDBJitElfDump, &pszGDBJitElfDumpW);
+ pszGDBJitElfDump = NarrowWideChar(pszGDBJitElfDumpW);
+ }
+#endif // FEATURE_GDBJIT && _DEBUG
+
+#if defined(FEATURE_GDBJIT_FRAME)
+ fGDBJitEmitDebugFrame = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_GDBJitEmitDebugFrame) != 0;
+#endif
return hr;
}
diff --git a/src/vm/eeconfig.h b/src/vm/eeconfig.h
index ccd5cd28bd..c55ba06a0b 100644
--- a/src/vm/eeconfig.h
+++ b/src/vm/eeconfig.h
@@ -286,6 +286,21 @@ public:
bool TieredCompilation(void) const {LIMITED_METHOD_CONTRACT; return fTieredCompilation; }
#endif
+#if defined(FEATURE_GDBJIT) && defined(_DEBUG)
+ inline bool ShouldDumpElfOnMethod(LPCUTF8 methodName) const
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(methodName, NULL_OK));
+ } CONTRACTL_END
+ return RegexOrExactMatch(pszGDBJitElfDump, methodName);
+ }
+#endif // FEATURE_GDBJIT && _DEBUG
+
+#if defined(FEATURE_GDBJIT_FRAME)
+ inline bool ShouldEmitDebugFrame(void) const {LIMITED_METHOD_CONTRACT; return fGDBJitEmitDebugFrame;}
+#endif // FEATURE_GDBJIT_FRAME
BOOL PInvokeRestoreEsp(BOOL fDefault) const
{
LIMITED_METHOD_CONTRACT;
@@ -474,7 +489,6 @@ public:
}
#endif // FEATURE_COMINTEROP
- bool VerifyModulesOnLoad(void) const { LIMITED_METHOD_CONTRACT; return fVerifyAllOnLoad; }
#ifdef _DEBUG
bool ExpandModulesOnLoad(void) const { LIMITED_METHOD_CONTRACT; return fExpandAllOnLoad; }
#endif //_DEBUG
@@ -934,8 +948,6 @@ private: //----------------------------------------------------------------
bool m_fDeveloperInstallation; // We are on a developers machine
bool fAppDomainUnload; // Enable appdomain unloading
- bool fVerifyAllOnLoad; // True if we want to verify all methods in an assembly at load time.
-
DWORD dwADURetryCount;
#ifdef _DEBUG
@@ -1101,6 +1113,13 @@ private: //----------------------------------------------------------------
bool fTieredCompilation;
#endif
+#if defined(FEATURE_GDBJIT) && defined(_DEBUG)
+ LPCUTF8 pszGDBJitElfDump;
+#endif // FEATURE_GDBJIT && _DEBUG
+
+#if defined(FEATURE_GDBJIT_FRAME)
+ bool fGDBJitEmitDebugFrame;
+#endif
public:
HRESULT GetConfiguration_DontUse_(__in_z LPCWSTR pKey, ConfigSearch direction, __deref_out_opt LPCWSTR* value);
diff --git a/src/vm/eventpipe.cpp b/src/vm/eventpipe.cpp
index 9a94923493..eebd2744a4 100644
--- a/src/vm/eventpipe.cpp
+++ b/src/vm/eventpipe.cpp
@@ -2,6 +2,8 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
+#include "clrtypes.h"
+#include "safemath.h"
#include "common.h"
#include "eventpipe.h"
#include "eventpipebuffermanager.h"
@@ -38,6 +40,146 @@ extern "C" void InitProvidersAndEvents();
extern "C" void InitProvidersAndEvents();
#endif
+EventPipeEventPayload::EventPipeEventPayload(BYTE *pData, unsigned int length)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pData = pData;
+ m_pEventData = NULL;
+ m_eventDataCount = 0;
+ m_allocatedData = false;
+
+ m_size = length;
+}
+
+EventPipeEventPayload::EventPipeEventPayload(EventData **pEventData, unsigned int eventDataCount)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ m_pData = NULL;
+ m_pEventData = pEventData;
+ m_eventDataCount = eventDataCount;
+ m_allocatedData = false;
+
+ S_UINT32 tmp_size = S_UINT32(0);
+ for (unsigned int i=0; i<m_eventDataCount; i++)
+ {
+ tmp_size += S_UINT32((*m_pEventData)[i].Size);
+ }
+
+ if (tmp_size.IsOverflow())
+ {
+ // If there is an overflow, drop the data and create an empty payload
+ m_pEventData = NULL;
+ m_eventDataCount = 0;
+ m_size = 0;
+ }
+ else
+ {
+ m_size = tmp_size.Value();
+ }
+}
+
+EventPipeEventPayload::~EventPipeEventPayload()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(m_allocatedData && m_pData != NULL)
+ {
+ delete[] m_pData;
+ m_pData = NULL;
+ }
+}
+
+void EventPipeEventPayload::Flatten()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(m_size > 0)
+ {
+ if (!IsFlattened())
+ {
+ BYTE* tmp_pData = new (nothrow) BYTE[m_size];
+ if (tmp_pData != NULL)
+ {
+ m_allocatedData = true;
+ CopyData(tmp_pData);
+ m_pData = tmp_pData;
+ }
+ }
+ }
+}
+
+void EventPipeEventPayload::CopyData(BYTE *pDst)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(m_size > 0)
+ {
+ if(IsFlattened())
+ {
+ memcpy(pDst, m_pData, m_size);
+ }
+
+ else if(m_pEventData != NULL)
+ {
+ unsigned int offset = 0;
+ for(unsigned int i=0; i<m_eventDataCount; i++)
+ {
+ memcpy(pDst + offset, (BYTE*)(*m_pEventData)[i].Ptr, (*m_pEventData)[i].Size);
+ offset += (*m_pEventData)[i].Size;
+ }
+ }
+ }
+}
+
+BYTE* EventPipeEventPayload::GetFlatData()
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if (!IsFlattened())
+ {
+ Flatten();
+ }
+ return m_pData;
+}
+
void EventPipe::Initialize()
{
STANDARD_VM_CONTRACT;
@@ -238,7 +380,7 @@ bool EventPipe::Enabled()
return enabled;
}
-EventPipeProvider* EventPipe::CreateProvider(const GUID &providerID, EventPipeCallback pCallbackFunction, void *pCallbackData)
+EventPipeProvider* EventPipe::CreateProvider(const SString &providerName, EventPipeCallback pCallbackFunction, void *pCallbackData)
{
CONTRACTL
{
@@ -248,7 +390,7 @@ EventPipeProvider* EventPipe::CreateProvider(const GUID &providerID, EventPipeCa
}
CONTRACTL_END;
- return new EventPipeProvider(providerID, pCallbackFunction, pCallbackData);
+ return new EventPipeProvider(providerName, pCallbackFunction, pCallbackData);
}
void EventPipe::DeleteProvider(EventPipeProvider *pProvider)
@@ -289,6 +431,34 @@ void EventPipe::WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int leng
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EventPipeEventPayload payload(pData, length);
+ EventPipe::WriteEventInternal(event, payload, pActivityId, pRelatedActivityId);
+}
+
+void EventPipe::WriteEvent(EventPipeEvent &event, EventData **pEventData, unsigned int eventDataCount, LPCGUID pActivityId, LPCGUID pRelatedActivityId)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ EventPipeEventPayload payload(pEventData, eventDataCount);
+ EventPipe::WriteEventInternal(event, payload, pActivityId, pRelatedActivityId);
+}
+
+void EventPipe::WriteEventInternal(EventPipeEvent &event, EventPipeEventPayload &payload, LPCGUID pActivityId, LPCGUID pRelatedActivityId)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
PRECONDITION(s_pBufferManager != NULL);
}
CONTRACTL_END;
@@ -309,7 +479,7 @@ void EventPipe::WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int leng
if(!s_pConfig->RundownEnabled() && s_pBufferManager != NULL)
{
- if(!s_pBufferManager->WriteEvent(pThread, event, pData, length, pActivityId, pRelatedActivityId))
+ if(!s_pBufferManager->WriteEvent(pThread, event, payload, pActivityId, pRelatedActivityId))
{
// This is used in DEBUG to make sure that we don't log an event synchronously that we didn't log to the buffer.
return;
@@ -317,19 +487,23 @@ void EventPipe::WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int leng
}
else if(s_pConfig->RundownEnabled())
{
- // Write synchronously to the file.
- // We're under lock and blocking the disabling thread.
- EventPipeEventInstance instance(
- event,
- pThread->GetOSThreadId(),
- pData,
- length,
- pActivityId,
- pRelatedActivityId);
-
- if(s_pFile != NULL)
+ BYTE *pData = payload.GetFlatData();
+ if (pData != NULL)
{
- s_pFile->WriteEvent(instance);
+ // Write synchronously to the file.
+ // We're under lock and blocking the disabling thread.
+ EventPipeEventInstance instance(
+ event,
+ pThread->GetOSThreadId(),
+ pData,
+ payload.GetSize(),
+ pActivityId,
+ pRelatedActivityId);
+
+ if(s_pFile != NULL)
+ {
+ s_pFile->WriteEvent(instance);
+ }
}
}
@@ -337,25 +511,29 @@ void EventPipe::WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int leng
{
GCX_PREEMP();
- // Create an instance of the event for the synchronous path.
- EventPipeEventInstance instance(
- event,
- pThread->GetOSThreadId(),
- pData,
- length,
- pActivityId,
- pRelatedActivityId);
-
- // Write to the EventPipeFile if it exists.
- if(s_pSyncFile != NULL)
+ BYTE *pData = payload.GetFlatData();
+ if (pData != NULL)
{
- s_pSyncFile->WriteEvent(instance);
- }
+ // Create an instance of the event for the synchronous path.
+ EventPipeEventInstance instance(
+ event,
+ pThread->GetOSThreadId(),
+ pData,
+ payload.GetSize(),
+ pActivityId,
+ pRelatedActivityId);
+
+ // Write to the EventPipeFile if it exists.
+ if(s_pSyncFile != NULL)
+ {
+ s_pSyncFile->WriteEvent(instance);
+ }
- // Write to the EventPipeJsonFile if it exists.
- if(s_pJsonFile != NULL)
- {
- s_pJsonFile->WriteEvent(instance);
+ // Write to the EventPipeJsonFile if it exists.
+ if(s_pJsonFile != NULL)
+ {
+ s_pJsonFile->WriteEvent(instance);
+ }
}
}
#endif // _DEBUG
@@ -371,12 +549,14 @@ void EventPipe::WriteSampleProfileEvent(Thread *pSamplingThread, EventPipeEvent
}
CONTRACTL_END;
+ EventPipeEventPayload payload(pData, length);
+
// Write the event to the thread's buffer.
if(s_pBufferManager != NULL)
{
// Specify the sampling thread as the "current thread", so that we select the right buffer.
// Specify the target thread so that the event gets properly attributed.
- if(!s_pBufferManager->WriteEvent(pSamplingThread, *pEvent, pData, length, NULL /* pActivityId */, NULL /* pRelatedActivityId */, pTargetThread, &stackContents))
+ if(!s_pBufferManager->WriteEvent(pSamplingThread, *pEvent, payload, NULL /* pActivityId */, NULL /* pRelatedActivityId */, pTargetThread, &stackContents))
{
// This is used in DEBUG to make sure that we don't log an event synchronously that we didn't log to the buffer.
return;
@@ -520,7 +700,7 @@ void QCALLTYPE EventPipeInternal::Disable()
}
INT_PTR QCALLTYPE EventPipeInternal::CreateProvider(
- GUID providerID,
+ __in_z LPCWSTR providerName,
EventPipeCallback pCallbackFunc)
{
QCALL_CONTRACT;
@@ -529,7 +709,7 @@ INT_PTR QCALLTYPE EventPipeInternal::CreateProvider(
BEGIN_QCALL;
- pProvider = EventPipe::CreateProvider(providerID, pCallbackFunc, NULL);
+ pProvider = EventPipe::CreateProvider(providerName, pCallbackFunc, NULL);
END_QCALL;
@@ -595,4 +775,22 @@ void QCALLTYPE EventPipeInternal::WriteEvent(
END_QCALL;
}
+void QCALLTYPE EventPipeInternal::WriteEventData(
+ INT_PTR eventHandle,
+ unsigned int eventID,
+ EventData **pEventData,
+ unsigned int eventDataCount,
+ LPCGUID pActivityId,
+ LPCGUID pRelatedActivityId)
+{
+ QCALL_CONTRACT;
+ BEGIN_QCALL;
+
+ _ASSERTE(eventHandle != NULL);
+ EventPipeEvent *pEvent = reinterpret_cast<EventPipeEvent *>(eventHandle);
+ EventPipe::WriteEvent(*pEvent, pEventData, eventDataCount, pActivityId, pRelatedActivityId);
+
+ END_QCALL;
+}
+
#endif // FEATURE_PERFTRACING
diff --git a/src/vm/eventpipe.h b/src/vm/eventpipe.h
index fa7d734280..bac7be6ac8 100644
--- a/src/vm/eventpipe.h
+++ b/src/vm/eventpipe.h
@@ -29,6 +29,69 @@ typedef void (*EventPipeCallback)(
void *FilterData,
void *CallbackContext);
+struct EventData
+{
+public:
+ unsigned long Ptr;
+ unsigned int Size;
+ unsigned int Reserved;
+};
+
+class EventPipeEventPayload
+{
+private:
+ BYTE *m_pData;
+ EventData **m_pEventData;
+ unsigned int m_eventDataCount;
+ unsigned int m_size;
+ bool m_allocatedData;
+
+ // If the data is stored only as an array of EventData objects, create a flat buffer and copy into it
+ void Flatten();
+
+public:
+ // Build this payload with a flat buffer inside
+ EventPipeEventPayload(BYTE *pData, unsigned int length);
+
+ // Build this payload to contain an array of EventData objects
+ EventPipeEventPayload(EventData **pEventData, unsigned int eventDataCount);
+
+ // If a buffer was allocated internally, delete it
+ ~EventPipeEventPayload();
+
+ // Copy the data (whether flat or array of objects) into a flat buffer at pDst
+ // Assumes that pDst points to an appropriatly sized buffer
+ void CopyData(BYTE *pDst);
+
+ // Get the flat formatted data in this payload
+ // This method will allocate a buffer if it does not already contain flattened data
+ // This method will return NULL on OOM if a buffer needed to be allocated
+ BYTE* GetFlatData();
+
+ // Return true is the data is stored in a flat buffer
+ bool IsFlattened() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pData != NULL;
+ }
+
+ // The the size of buffer needed to contain the stored data
+ unsigned int GetSize() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_size;
+ }
+
+ EventData** GetEventDataArray() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_pEventData;
+ }
+};
+
class StackContents
{
private:
@@ -181,15 +244,19 @@ class EventPipe
static bool Enabled();
// Create a provider.
- static EventPipeProvider* CreateProvider(const GUID &providerID, EventPipeCallback pCallbackFunction = NULL, void *pCallbackData = NULL);
+ static EventPipeProvider* CreateProvider(const SString &providerName, EventPipeCallback pCallbackFunction = NULL, void *pCallbackData = NULL);
// Delete a provider.
static void DeleteProvider(EventPipeProvider *pProvider);
- // Write out an event.
+ // Write out an event from a flat buffer.
// Data is written as a serialized blob matching the ETW serialization conventions.
static void WriteEvent(EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId = NULL, LPCGUID pRelatedActivityId = NULL);
+ // Write out an event from an EventData array.
+ // Data is written as a serialized blob matching the ETW serialization conventions.
+ static void WriteEvent(EventPipeEvent &event, EventData **pEventData, unsigned int eventDataCount, LPCGUID pActivityId = NULL, LPCGUID pRelatedActivityId = NULL);
+
// Write out a sample profile event.
static void WriteSampleProfileEvent(Thread *pSamplingThread, EventPipeEvent *pEvent, Thread *pTargetThread, StackContents &stackContents, BYTE *pData = NULL, unsigned int length = 0);
@@ -199,6 +266,11 @@ class EventPipe
// Get the managed call stack for the specified thread.
static bool WalkManagedStackForThread(Thread *pThread, StackContents &stackContents);
+ protected:
+
+ // The counterpart to WriteEvent which after the payload is constructed
+ static void WriteEventInternal(EventPipeEvent &event, EventPipeEventPayload &payload, LPCGUID pActivityId = NULL, LPCGUID pRelatedActivityId = NULL);
+
private:
// Callback function for the stack walker. For each frame walked, this callback is invoked.
@@ -286,7 +358,7 @@ public:
static void QCALLTYPE Disable();
static INT_PTR QCALLTYPE CreateProvider(
- GUID providerID,
+ __in_z LPCWSTR providerName,
EventPipeCallback pCallbackFunc);
static INT_PTR QCALLTYPE DefineEvent(
@@ -307,6 +379,13 @@ public:
void *pData,
unsigned int length,
LPCGUID pActivityId, LPCGUID pRelatedActivityId);
+
+ static void QCALLTYPE WriteEventData(
+ INT_PTR eventHandle,
+ unsigned int eventID,
+ EventData **pEventData,
+ unsigned int eventDataCount,
+ LPCGUID pActivityId, LPCGUID pRelatedActivityId);
};
#endif // FEATURE_PERFTRACING
diff --git a/src/vm/eventpipebuffer.cpp b/src/vm/eventpipebuffer.cpp
index 00652c9fac..80b4a4f1b7 100644
--- a/src/vm/eventpipebuffer.cpp
+++ b/src/vm/eventpipebuffer.cpp
@@ -4,6 +4,7 @@
#include "common.h"
+#include "eventpipe.h"
#include "eventpipeeventinstance.h"
#include "eventpipebuffer.h"
@@ -46,7 +47,7 @@ EventPipeBuffer::~EventPipeBuffer()
}
}
-bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int dataLength, LPCGUID pActivityId, LPCGUID pRelatedActivityId, StackContents *pStack)
+bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeEvent &event, EventPipeEventPayload &payload, LPCGUID pActivityId, LPCGUID pRelatedActivityId, StackContents *pStack)
{
CONTRACTL
{
@@ -58,7 +59,7 @@ bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *p
CONTRACTL_END;
// Calculate the size of the event.
- unsigned int eventSize = sizeof(EventPipeEventInstance) + dataLength;
+ unsigned int eventSize = sizeof(EventPipeEventInstance) + payload.GetSize();
// Make sure we have enough space to write the event.
if(m_pCurrent + eventSize >= m_pLimit)
@@ -77,7 +78,7 @@ bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *p
event,
pThread->GetOSThreadId(),
pDataDest,
- dataLength,
+ payload.GetSize(),
pActivityId,
pRelatedActivityId);
@@ -89,9 +90,9 @@ bool EventPipeBuffer::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *p
}
// Write the event payload data to the buffer.
- if(dataLength > 0)
+ if(payload.GetSize() > 0)
{
- memcpy(pDataDest, pData, dataLength);
+ payload.CopyData(pDataDest);
}
// Save the most recent event timestamp.
diff --git a/src/vm/eventpipebuffer.h b/src/vm/eventpipebuffer.h
index f279a2865c..c96ad26609 100644
--- a/src/vm/eventpipebuffer.h
+++ b/src/vm/eventpipebuffer.h
@@ -7,6 +7,7 @@
#ifdef FEATURE_PERFTRACING
+#include "eventpipe.h"
#include "eventpipeevent.h"
#include "eventpipeeventinstance.h"
@@ -81,7 +82,7 @@ public:
// Returns:
// - true: The write succeeded.
// - false: The write failed. In this case, the buffer should be considered full.
- bool WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int dataLength, LPCGUID pActivityId, LPCGUID pRelatedActivityId, StackContents *pStack = NULL);
+ bool WriteEvent(Thread *pThread, EventPipeEvent &event, EventPipeEventPayload &payload, LPCGUID pActivityId, LPCGUID pRelatedActivityId, StackContents *pStack = NULL);
// Get the timestamp of the most recent event in the buffer.
LARGE_INTEGER GetMostRecentTimeStamp() const;
diff --git a/src/vm/eventpipebuffermanager.cpp b/src/vm/eventpipebuffermanager.cpp
index 86a3e03c59..e7d97d5732 100644
--- a/src/vm/eventpipebuffermanager.cpp
+++ b/src/vm/eventpipebuffermanager.cpp
@@ -3,6 +3,7 @@
// See the LICENSE file in the project root for more information.
#include "common.h"
+#include "eventpipe.h"
#include "eventpipeconfiguration.h"
#include "eventpipebuffer.h"
#include "eventpipebuffermanager.h"
@@ -32,6 +33,49 @@ EventPipeBufferManager::EventPipeBufferManager()
#endif // _DEBUG
}
+EventPipeBufferManager::~EventPipeBufferManager()
+{
+ CONTRACTL
+ {
+ THROWS;
+ GC_TRIGGERS;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ if(m_pPerThreadBufferList != NULL)
+ {
+ SListElem<EventPipeBufferList*> *pElem = m_pPerThreadBufferList->GetHead();
+ while(pElem != NULL)
+ {
+ SListElem<EventPipeBufferList*> *pCurElem = pElem;
+
+ EventPipeBufferList *pThreadBufferList = pCurElem->GetValue();
+ if (!pThreadBufferList->OwnedByThread())
+ {
+ Thread *pThread = NULL;
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ {
+ if (pThread->GetEventPipeBufferList() == pThreadBufferList)
+ {
+ pThread->SetEventPipeBufferList(NULL);
+ break;
+ }
+ }
+
+ // We don't delete buffers themself because they can be in-use
+ delete(pThreadBufferList);
+ }
+
+ pElem = m_pPerThreadBufferList->GetNext(pElem);
+ delete(pCurElem);
+ }
+
+ delete(m_pPerThreadBufferList);
+ m_pPerThreadBufferList = NULL;
+ }
+}
+
EventPipeBuffer* EventPipeBufferManager::AllocateBufferForThread(Thread *pThread, unsigned int requestSize)
{
CONTRACTL
@@ -217,7 +261,7 @@ void EventPipeBufferManager::DeAllocateBuffer(EventPipeBuffer *pBuffer)
}
}
-bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId, Thread *pEventThread, StackContents *pStack)
+bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeEvent &event, EventPipeEventPayload &payload, LPCGUID pActivityId, LPCGUID pRelatedActivityId, Thread *pEventThread, StackContents *pStack)
{
CONTRACTL
{
@@ -276,7 +320,7 @@ bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeEvent &event,
else
{
// Attempt to write the event to the buffer. If this fails, we should allocate a new buffer.
- allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, pData, length, pActivityId, pRelatedActivityId, pStack);
+ allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, payload, pActivityId, pRelatedActivityId, pStack);
}
}
@@ -290,7 +334,7 @@ bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeEvent &event,
// However, the GC is waiting on this call to return so that it can make forward progress. Thus it is not safe
// to switch to preemptive mode here.
- unsigned int requestSize = sizeof(EventPipeEventInstance) + length;
+ unsigned int requestSize = sizeof(EventPipeEventInstance) + payload.GetSize();
pBuffer = AllocateBufferForThread(pThread, requestSize);
}
@@ -299,7 +343,7 @@ bool EventPipeBufferManager::WriteEvent(Thread *pThread, EventPipeEvent &event,
// This is the second time if this thread did have one or more buffers, but they were full.
if(allocNewBuffer && pBuffer != NULL)
{
- allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, pData, length, pActivityId, pRelatedActivityId, pStack);
+ allocNewBuffer = !pBuffer->WriteEvent(pEventThread, event, payload, pActivityId, pRelatedActivityId, pStack);
}
// Mark that the thread is no longer writing an event.
@@ -435,8 +479,15 @@ void EventPipeBufferManager::DeAllocateBuffers()
// In DEBUG, make sure that the element was found and removed.
_ASSERTE(pElem != NULL);
+
+ SListElem<EventPipeBufferList*> *pCurElem = pElem;
+ pElem = m_pPerThreadBufferList->GetNext(pElem);
+ delete(pCurElem);
+ }
+ else
+ {
+ pElem = m_pPerThreadBufferList->GetNext(pElem);
}
- pElem = m_pPerThreadBufferList->GetNext(pElem);
}
// Remove the list reference from the thread.
@@ -482,12 +533,18 @@ void EventPipeBufferManager::DeAllocateBuffers()
pElem = m_pPerThreadBufferList->FindAndRemove(pElem);
_ASSERTE(pElem != NULL);
+ SListElem<EventPipeBufferList*> *pCurElem = pElem;
+ pElem = m_pPerThreadBufferList->GetNext(pElem);
+ delete(pCurElem);
+
// Now that all of the list elements have been freed, free the list itself.
delete(pBufferList);
pBufferList = NULL;
}
-
- pElem = m_pPerThreadBufferList->GetNext(pElem);
+ else
+ {
+ pElem = m_pPerThreadBufferList->GetNext(pElem);
+ }
}
}
diff --git a/src/vm/eventpipebuffermanager.h b/src/vm/eventpipebuffermanager.h
index a53721b7b8..942d4e2242 100644
--- a/src/vm/eventpipebuffermanager.h
+++ b/src/vm/eventpipebuffermanager.h
@@ -7,6 +7,7 @@
#ifdef FEATURE_PERFTRACING
+#include "eventpipe.h"
#include "eventpipefile.h"
#include "eventpipebuffer.h"
#include "spinlock.h"
@@ -61,13 +62,14 @@ private:
public:
EventPipeBufferManager();
+ ~EventPipeBufferManager();
// Write an event to the input thread's current event buffer.
// An optional eventThread can be provided for sample profiler events.
// This is because the thread that writes the events is not the same as the "event thread".
// An optional stack trace can be provided for sample profiler events.
// Otherwise, if a stack trace is needed, one will be automatically collected.
- bool WriteEvent(Thread *pThread, EventPipeEvent &event, BYTE *pData, unsigned int length, LPCGUID pActivityId, LPCGUID pRelatedActivityId, Thread *pEventThread = NULL, StackContents *pStack = NULL);
+ bool WriteEvent(Thread *pThread, EventPipeEvent &event, EventPipeEventPayload &payload, LPCGUID pActivityId, LPCGUID pRelatedActivityId, Thread *pEventThread = NULL, StackContents *pStack = NULL);
// Write the contents of the managed buffers to the specified file.
// The stopTimeStamp is used to determine when tracing was stopped to ensure that we
diff --git a/src/vm/eventpipeconfiguration.cpp b/src/vm/eventpipeconfiguration.cpp
index 0a266e4849..80c4878782 100644
--- a/src/vm/eventpipeconfiguration.cpp
+++ b/src/vm/eventpipeconfiguration.cpp
@@ -10,9 +10,7 @@
#ifdef FEATURE_PERFTRACING
-// {5291C09C-2660-4D6A-83A3-C383FD020DEC}
-const GUID EventPipeConfiguration::s_configurationProviderID =
- { 0x5291c09c, 0x2660, 0x4d6a, { 0x83, 0xa3, 0xc3, 0x83, 0xfd, 0x2, 0xd, 0xec } };
+const WCHAR* EventPipeConfiguration::s_configurationProviderName = W("Microsoft-DotNETCore-EventPipeConfiguration");
EventPipeConfiguration::EventPipeConfiguration()
{
@@ -35,6 +33,12 @@ EventPipeConfiguration::~EventPipeConfiguration()
}
CONTRACTL_END;
+ if(m_pConfigProvider != NULL)
+ {
+ delete(m_pConfigProvider);
+ m_pConfigProvider = NULL;
+ }
+
if(m_pEnabledProviderList != NULL)
{
delete(m_pEnabledProviderList);
@@ -43,6 +47,15 @@ EventPipeConfiguration::~EventPipeConfiguration()
if(m_pProviderList != NULL)
{
+ SListElem<EventPipeProvider*> *pElem = m_pProviderList->GetHead();
+ while(pElem != NULL)
+ {
+ // We don't delete provider itself because it can be in-use
+ SListElem<EventPipeProvider*> *pCurElem = pElem;
+ pElem = m_pProviderList->GetNext(pElem);
+ delete(pCurElem);
+ }
+
delete(m_pProviderList);
m_pProviderList = NULL;
}
@@ -59,7 +72,7 @@ void EventPipeConfiguration::Initialize()
CONTRACTL_END;
// Create the configuration provider.
- m_pConfigProvider = EventPipe::CreateProvider(s_configurationProviderID);
+ m_pConfigProvider = EventPipe::CreateProvider(SL(s_configurationProviderName));
// Create the metadata event.
m_pMetadataEvent = m_pConfigProvider->AddEvent(
@@ -84,7 +97,7 @@ bool EventPipeConfiguration::RegisterProvider(EventPipeProvider &provider)
CrstHolder _crst(EventPipe::GetLock());
// See if we've already registered this provider.
- EventPipeProvider *pExistingProvider = GetProviderNoLock(provider.GetProviderID());
+ EventPipeProvider *pExistingProvider = GetProviderNoLock(provider.GetProviderName());
if(pExistingProvider != NULL)
{
return false;
@@ -139,6 +152,7 @@ bool EventPipeConfiguration::UnregisterProvider(EventPipeProvider &provider)
{
if(m_pProviderList->FindAndRemove(pElem) != NULL)
{
+ delete(pElem);
return true;
}
}
@@ -146,7 +160,7 @@ bool EventPipeConfiguration::UnregisterProvider(EventPipeProvider &provider)
return false;
}
-EventPipeProvider* EventPipeConfiguration::GetProvider(const GUID &providerID)
+EventPipeProvider* EventPipeConfiguration::GetProvider(const SString &providerName)
{
CONTRACTL
{
@@ -160,10 +174,10 @@ EventPipeProvider* EventPipeConfiguration::GetProvider(const GUID &providerID)
// modify the list.
CrstHolder _crst(EventPipe::GetLock());
- return GetProviderNoLock(providerID);
+ return GetProviderNoLock(providerName);
}
-EventPipeProvider* EventPipeConfiguration::GetProviderNoLock(const GUID &providerID)
+EventPipeProvider* EventPipeConfiguration::GetProviderNoLock(const SString &providerName)
{
CONTRACTL
{
@@ -178,7 +192,7 @@ EventPipeProvider* EventPipeConfiguration::GetProviderNoLock(const GUID &provide
while(pElem != NULL)
{
EventPipeProvider *pProvider = pElem->GetValue();
- if(pProvider->GetProviderID() == providerID)
+ if(pProvider->GetProviderName().Equals(providerName))
{
return pProvider;
}
@@ -305,8 +319,8 @@ void EventPipeConfiguration::EnableRundown()
_ASSERTE(m_pEnabledProviderList == NULL);
const unsigned int numRundownProviders = 2;
EventPipeProviderConfiguration rundownProviders[numRundownProviders];
- rundownProviders[0] = EventPipeProviderConfiguration(W("e13c0d23-ccbc-4e12-931b-d9cc2eee27e4"), 0x80020138, static_cast<unsigned int>(EventPipeEventLevel::Verbose)); // Public provider.
- rundownProviders[1] = EventPipeProviderConfiguration(W("a669021c-c450-4609-a035-5af59af4df18"), 0x80020138, static_cast<unsigned int>(EventPipeEventLevel::Verbose)); // Rundown provider.
+ rundownProviders[0] = EventPipeProviderConfiguration(W("Microsoft-Windows-DotNETRuntime"), 0x80020138, static_cast<unsigned int>(EventPipeEventLevel::Verbose)); // Public provider.
+ rundownProviders[1] = EventPipeProviderConfiguration(W("Microsoft-Windows-DotNETRuntimeRundown"), 0x80020138, static_cast<unsigned int>(EventPipeEventLevel::Verbose)); // Rundown provider.
// Enable rundown.
m_rundownEnabled = true;
@@ -333,12 +347,13 @@ EventPipeEventInstance* EventPipeConfiguration::BuildEventMetadataEvent(EventPip
// Calculate the size of the event.
EventPipeEvent &sourceEvent = *sourceInstance.GetEvent();
- const GUID &providerID = sourceEvent.GetProvider()->GetProviderID();
+ const SString &providerName = sourceEvent.GetProvider()->GetProviderName();
unsigned int eventID = sourceEvent.GetEventID();
unsigned int eventVersion = sourceEvent.GetEventVersion();
BYTE *pPayloadData = sourceEvent.GetMetadata();
unsigned int payloadLength = sourceEvent.GetMetadataLength();
- unsigned int instancePayloadSize = sizeof(providerID) + sizeof(eventID) + sizeof(eventVersion) + sizeof(payloadLength) + payloadLength;
+ unsigned int providerNameLength = (providerName.GetCount() + 1) * sizeof(WCHAR);
+ unsigned int instancePayloadSize = providerNameLength + sizeof(eventID) + sizeof(eventVersion) + sizeof(payloadLength) + payloadLength;
// Allocate the payload.
BYTE *pInstancePayload = new BYTE[instancePayloadSize];
@@ -347,10 +362,10 @@ EventPipeEventInstance* EventPipeConfiguration::BuildEventMetadataEvent(EventPip
BYTE *currentPtr = pInstancePayload;
// Write the provider ID.
- memcpy(currentPtr, (BYTE*)&providerID, sizeof(providerID));
- currentPtr += sizeof(providerID);
+ memcpy(currentPtr, (BYTE*)providerName.GetUnicode(), providerNameLength);
+ currentPtr += providerNameLength;
- // Write the event ID.
+ // Write the event name as null-terminated unicode.
memcpy(currentPtr, &eventID, sizeof(eventID));
currentPtr += sizeof(eventID);
@@ -402,9 +417,14 @@ void EventPipeConfiguration::DeleteDeferredProviders()
{
// The act of deleting the provider unregisters it and removes it from the list.
delete(pProvider);
+ SListElem<EventPipeProvider*> *pCurElem = pElem;
+ pElem = m_pProviderList->GetNext(pElem);
+ delete(pCurElem);
+ }
+ else
+ {
+ pElem = m_pProviderList->GetNext(pElem);
}
-
- pElem = m_pProviderList->GetNext(pElem);
}
}
@@ -494,16 +514,7 @@ EventPipeEnabledProvider* EventPipeEnabledProviderList::GetEnabledProvider(
return NULL;
}
- // TEMPORARY: Convert the provider GUID to a string.
- const unsigned int guidSize = 39;
- WCHAR wszProviderID[guidSize];
- if(!StringFromGUID2(pProvider->GetProviderID(), wszProviderID, guidSize))
- {
- wszProviderID[0] = '\0';
- }
-
- // Strip off the {}.
- SString providerNameStr(&wszProviderID[1], guidSize-3);
+ SString providerNameStr = pProvider->GetProviderName();
LPCWSTR providerName = providerNameStr.GetUnicode();
EventPipeEnabledProvider *pEnabledProvider = NULL;
diff --git a/src/vm/eventpipeconfiguration.h b/src/vm/eventpipeconfiguration.h
index aac9bd6065..1d161367b2 100644
--- a/src/vm/eventpipeconfiguration.h
+++ b/src/vm/eventpipeconfiguration.h
@@ -42,7 +42,7 @@ public:
bool UnregisterProvider(EventPipeProvider &provider);
// Get the provider with the specified provider ID if it exists.
- EventPipeProvider* GetProvider(const GUID &providerID);
+ EventPipeProvider* GetProvider(const SString &providerID);
// Get the configured size of the circular buffer.
size_t GetCircularBufferSize() const;
@@ -77,7 +77,7 @@ public:
private:
// Get the provider without taking the lock.
- EventPipeProvider* GetProviderNoLock(const GUID &providerID);
+ EventPipeProvider* GetProviderNoLock(const SString &providerID);
// Determines whether or not the event pipe is enabled.
Volatile<bool> m_enabled;
@@ -98,9 +98,9 @@ private:
// The event used to write event information to the event stream.
EventPipeEvent *m_pMetadataEvent;
- // The provider ID for the configuration event pipe provider.
+ // The provider name for the configuration event pipe provider.
// This provider is used to emit configuration events.
- static const GUID s_configurationProviderID;
+ const static WCHAR* s_configurationProviderName;
// True if rundown is enabled.
Volatile<bool> m_rundownEnabled;
diff --git a/src/vm/eventpipeeventinstance.cpp b/src/vm/eventpipeeventinstance.cpp
index afde2c0547..305b6dac04 100644
--- a/src/vm/eventpipeeventinstance.cpp
+++ b/src/vm/eventpipeeventinstance.cpp
@@ -182,19 +182,11 @@ void EventPipeEventInstance::SerializeToJsonFile(EventPipeJsonFile *pFile)
EX_TRY
{
- const unsigned int guidSize = 39;
- WCHAR wszProviderID[guidSize];
- if(!StringFromGUID2(m_pEvent->GetProvider()->GetProviderID(), wszProviderID, guidSize))
- {
- wszProviderID[0] = '\0';
- }
-
- // Strip off the {}.
StackScratchBuffer scratch;
- SString guidStr(&wszProviderID[1], guidSize-3);
+ SString providerName = m_pEvent->GetProvider()->GetProviderName();
SString message;
- message.Printf("Provider=%s/EventID=%d/Version=%d", guidStr.GetANSI(scratch), m_pEvent->GetEventID(), m_pEvent->GetEventVersion());
+ message.Printf("Provider=%s/EventID=%d/Version=%d", providerName.GetANSI(scratch), m_pEvent->GetEventID(), m_pEvent->GetEventVersion());
pFile->WriteEvent(m_timeStamp, m_threadID, message, m_stackContents);
}
EX_CATCH{} EX_END_CATCH(SwallowAllExceptions);
diff --git a/src/vm/eventpipefile.cpp b/src/vm/eventpipefile.cpp
index f574814586..26e04480ee 100644
--- a/src/vm/eventpipefile.cpp
+++ b/src/vm/eventpipefile.cpp
@@ -25,6 +25,9 @@ EventPipeFile::EventPipeFile(
}
CONTRACTL_END;
+ SetObjectVersion(2);
+ SetMinReaderVersion(0);
+
m_pSerializer = new FastSerializer(outputFilePath, *this);
m_serializationLock.Init(LOCK_TYPE_DEFAULT);
m_pMetadataLabels = new MapSHashWithRemove<EventPipeEvent*, StreamLabel>();
diff --git a/src/vm/eventpipeprovider.cpp b/src/vm/eventpipeprovider.cpp
index 896f9b2650..7361541e77 100644
--- a/src/vm/eventpipeprovider.cpp
+++ b/src/vm/eventpipeprovider.cpp
@@ -7,10 +7,11 @@
#include "eventpipeconfiguration.h"
#include "eventpipeevent.h"
#include "eventpipeprovider.h"
+#include "sha1.h"
#ifdef FEATURE_PERFTRACING
-EventPipeProvider::EventPipeProvider(const GUID &providerID, EventPipeCallback pCallbackFunction, void *pCallbackData)
+EventPipeProvider::EventPipeProvider(const SString &providerName, EventPipeCallback pCallbackFunction, void *pCallbackData)
{
CONTRACTL
{
@@ -20,7 +21,7 @@ EventPipeProvider::EventPipeProvider(const GUID &providerID, EventPipeCallback p
}
CONTRACTL_END;
- m_providerID = providerID;
+ m_providerName = providerName;
m_enabled = false;
m_keywords = 0;
m_providerLevel = EventPipeEventLevel::Critical;
@@ -65,7 +66,9 @@ EventPipeProvider::~EventPipeProvider()
EventPipeEvent *pEvent = pElem->GetValue();
delete pEvent;
+ SListElem<EventPipeEvent*> *pCurElem = pElem;
pElem = m_pEventList->GetNext(pElem);
+ delete pCurElem;
}
delete m_pEventList;
@@ -73,11 +76,11 @@ EventPipeProvider::~EventPipeProvider()
}
}
-const GUID& EventPipeProvider::GetProviderID() const
+const SString& EventPipeProvider::GetProviderName() const
{
LIMITED_METHOD_CONTRACT;
- return m_providerID;
+ return m_providerName;
}
bool EventPipeProvider::Enabled() const
@@ -198,7 +201,7 @@ void EventPipeProvider::InvokeCallback()
if(m_pCallbackFunction != NULL && !g_fEEShutDown)
{
(*m_pCallbackFunction)(
- &m_providerID,
+ NULL, /* providerId */
m_enabled,
(UCHAR) m_providerLevel,
m_keywords,
diff --git a/src/vm/eventpipeprovider.h b/src/vm/eventpipeprovider.h
index d2c459ef32..7b92faca72 100644
--- a/src/vm/eventpipeprovider.h
+++ b/src/vm/eventpipeprovider.h
@@ -34,6 +34,9 @@ private:
// The GUID of the provider.
GUID m_providerID;
+ // The name of the provider.
+ SString m_providerName;
+
// True if the provider is enabled.
bool m_enabled;
@@ -61,14 +64,14 @@ private:
bool m_deleteDeferred;
// Private constructor because all providers are created through EventPipe::CreateProvider.
- EventPipeProvider(const GUID &providerID, EventPipeCallback pCallbackFunction = NULL, void *pCallbackData = NULL);
+ EventPipeProvider(const SString &providerName, EventPipeCallback pCallbackFunction = NULL, void *pCallbackData = NULL);
public:
~EventPipeProvider();
- // Get the provider ID.
- const GUID& GetProviderID() const;
+ // Get the provider Name.
+ const SString& GetProviderName() const;
// Determine if the provider is enabled.
bool Enabled() const;
diff --git a/src/vm/eventtrace.cpp b/src/vm/eventtrace.cpp
index 16f729d505..6325edb462 100644
--- a/src/vm/eventtrace.cpp
+++ b/src/vm/eventtrace.cpp
@@ -6834,7 +6834,7 @@ VOID ETW::MethodLog::SendEventsForJitMethodsHelper(BaseDomain *pDomainFilter,
// manager locks.
// see code:#TableLockHolder
ReJITID rejitID =
- fGetReJitIDs ? pMD->GetReJitManager()->GetReJitIdNoLock(pMD, codeStart) : 0;
+ fGetReJitIDs ? ReJitManager::GetReJitIdNoLock(pMD, codeStart) : 0;
// There are small windows of time where the heap iterator may come across a
// codeStart that is not yet published to the MethodDesc. This may happen if
@@ -6962,8 +6962,8 @@ VOID ETW::MethodLog::SendEventsForJitMethods(BaseDomain *pDomainFilter, LoaderAl
// We only support getting rejit IDs when filtering by domain.
if (pDomainFilter)
{
- ReJitManager::TableLockHolder lkRejitMgrSharedDomain(SharedDomain::GetDomain()->GetReJitManager());
- ReJitManager::TableLockHolder lkRejitMgrModule(pDomainFilter->GetReJitManager());
+ CodeVersionManager::TableLockHolder lkRejitMgrSharedDomain(SharedDomain::GetDomain()->GetCodeVersionManager());
+ CodeVersionManager::TableLockHolder lkRejitMgrModule(pDomainFilter->GetCodeVersionManager());
SendEventsForJitMethodsHelper(pDomainFilter,
pLoaderAllocatorFilter,
dwEventOptions,
diff --git a/src/vm/exceptionhandling.cpp b/src/vm/exceptionhandling.cpp
index c6d42eddd7..a52ccd7c2a 100644
--- a/src/vm/exceptionhandling.cpp
+++ b/src/vm/exceptionhandling.cpp
@@ -5186,6 +5186,38 @@ BOOL IsSafeToHandleHardwareException(PCONTEXT contextRecord, PEXCEPTION_RECORD e
IsIPInMarkedJitHelper(controlPc));
}
+#ifdef _TARGET_ARM_
+static inline BOOL HandleArmSingleStep(PCONTEXT pContext, PEXCEPTION_RECORD pExceptionRecord, Thread *pThread)
+{
+#ifdef __linux__
+ // On ARM Linux exception point to the break instruction,
+ // but the rest of the code expects that it points to an instruction after the break
+ if (pExceptionRecord->ExceptionCode == EXCEPTION_BREAKPOINT)
+ {
+ SetIP(pContext, GetIP(pContext) + CORDbg_BREAK_INSTRUCTION_SIZE);
+ pExceptionRecord->ExceptionAddress = (void *)GetIP(pContext);
+ }
+#endif
+ // On ARM we don't have any reliable hardware support for single stepping so it is emulated in software.
+ // The implementation will end up throwing an EXCEPTION_BREAKPOINT rather than an EXCEPTION_SINGLE_STEP
+ // and leaves other aspects of the thread context in an invalid state. Therefore we use this opportunity
+ // to fixup the state before any other part of the system uses it (we do it here since only the debugger
+ // uses single step functionality).
+
+ // First ask the emulation itself whether this exception occurred while single stepping was enabled. If so
+ // it will fix up the context to be consistent again and return true. If so and the exception was
+ // EXCEPTION_BREAKPOINT then we translate it to EXCEPTION_SINGLE_STEP (otherwise we leave it be, e.g. the
+ // instruction stepped caused an access violation).
+ if (pThread->HandleSingleStep(pContext, pExceptionRecord->ExceptionCode) && (pExceptionRecord->ExceptionCode == EXCEPTION_BREAKPOINT))
+ {
+ pExceptionRecord->ExceptionCode = EXCEPTION_SINGLE_STEP;
+ pExceptionRecord->ExceptionAddress = (void *)GetIP(pContext);
+ return TRUE;
+ }
+ return FALSE;
+}
+#endif // _TARGET_ARM_
+
BOOL HandleHardwareException(PAL_SEHException* ex)
{
_ASSERTE(IsSafeToHandleHardwareException(ex->GetContextRecord(), ex->GetExceptionRecord()));
@@ -5249,6 +5281,9 @@ BOOL HandleHardwareException(PAL_SEHException* ex)
Thread *pThread = GetThread();
if (pThread != NULL && g_pDebugInterface != NULL)
{
+#ifdef _TARGET_ARM_
+ HandleArmSingleStep(ex->GetContextRecord(), ex->GetExceptionRecord(), pThread);
+#endif
if (ex->GetExceptionRecord()->ExceptionCode == STATUS_BREAKPOINT)
{
// If this is breakpoint context, it is set up to point to an instruction after the break instruction.
diff --git a/src/vm/fastserializableobject.h b/src/vm/fastserializableobject.h
index cbfcfc9f0e..ec162e3cc3 100644
--- a/src/vm/fastserializableobject.h
+++ b/src/vm/fastserializableobject.h
@@ -25,6 +25,41 @@ public:
// Get the type name for the current object.
virtual const char* GetTypeName() = 0;
+
+ int GetObjectVersion() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_objectVersion;
+ }
+
+ int GetMinReaderVersion() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return m_minReaderVersion;
+ }
+
+protected:
+
+ void SetObjectVersion(int version)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_objectVersion = version;
+ }
+
+ void SetMinReaderVersion(int version)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ m_minReaderVersion = version;
+ }
+
+private:
+
+ int m_objectVersion = 1;
+ int m_minReaderVersion = 0;
};
#endif // FEATURE_PERFTRACING
diff --git a/src/vm/fastserializer.cpp b/src/vm/fastserializer.cpp
index 7f9b4e20a6..8e0e0ad768 100644
--- a/src/vm/fastserializer.cpp
+++ b/src/vm/fastserializer.cpp
@@ -226,8 +226,8 @@ void FastSerializer::WriteSerializationType(FastSerializableObject *pObject)
// Write the SerializationType version fields.
int serializationType[2];
- serializationType[0] = 1; // Object Version.
- serializationType[1] = 0; // Minimum Reader Version.
+ serializationType[0] = pObject->GetObjectVersion();
+ serializationType[1] = pObject->GetMinReaderVersion();
WriteBuffer((BYTE*) &serializationType, sizeof(serializationType));
// Write the SerializationType TypeName field.
diff --git a/src/vm/finalizerthread.cpp b/src/vm/finalizerthread.cpp
index 0a4da165a1..3ba3468407 100644
--- a/src/vm/finalizerthread.cpp
+++ b/src/vm/finalizerthread.cpp
@@ -909,7 +909,7 @@ void FinalizerThread::FinalizerThreadCreate()
// actual thread terminates.
GetFinalizerThread()->IncExternalCount();
- if (GetFinalizerThread()->CreateNewThread(0, &FinalizerThreadStart, NULL))
+ if (GetFinalizerThread()->CreateNewThread(0, &FinalizerThreadStart, NULL, W("Finalizer")) )
{
DWORD dwRet = GetFinalizerThread()->StartThread();
diff --git a/src/vm/frames.cpp b/src/vm/frames.cpp
index 86bb97b8c7..d38762b87e 100644
--- a/src/vm/frames.cpp
+++ b/src/vm/frames.cpp
@@ -13,7 +13,6 @@
#include "method.hpp"
#include "class.h"
#include "excep.h"
-#include "security.h"
#include "stublink.h"
#include "fieldmarshaler.h"
#include "siginfo.hpp"
diff --git a/src/vm/gccover.cpp b/src/vm/gccover.cpp
index 3e195796b4..895c176460 100644
--- a/src/vm/gccover.cpp
+++ b/src/vm/gccover.cpp
@@ -160,7 +160,7 @@ void SetupGcCoverage(MethodDesc* pMD, BYTE* methodStartPtr) {
{
BaseDomain* pDomain = pMD->GetDomain();
// Enter the global lock which protects the list of all functions being JITd
- ListLockHolder pJitLock(pDomain->GetJitLock());
+ JitListLock::LockHolder pJitLock(pDomain->GetJitLock());
// It is possible that another thread stepped in before we entered the global lock for the first time.
@@ -175,14 +175,14 @@ void SetupGcCoverage(MethodDesc* pMD, BYTE* methodStartPtr) {
#ifdef _DEBUG
description = pMD->m_pszDebugMethodName;
#endif
- ListLockEntryHolder pEntry(ListLockEntry::Find(pJitLock, pMD, description));
+ ReleaseHolder<JitListLockEntry> pEntry(JitListLockEntry::Find(pJitLock, pMD->GetInitialCodeVersion(), description));
// We have an entry now, we can release the global lock
pJitLock.Release();
// Take the entry lock
{
- ListLockEntryLockHolder pEntryLock(pEntry, FALSE);
+ JitListLockEntry::LockHolder pEntryLock(pEntry, FALSE);
if (pEntryLock.DeadlockAwareAcquire())
{
@@ -1035,6 +1035,10 @@ static SLOT getTargetOfCall(SLOT instrPtr, PCONTEXT regs, SLOT*nextInstr) {
unsigned int regnum = (instrPtr[0] & 0x78) >> 3;
return (BYTE *)getRegVal(regnum, regs);
}
+ else
+ {
+ return 0; // Not a call.
+ }
#elif defined(_TARGET_ARM64_)
if (((*reinterpret_cast<DWORD*>(instrPtr)) & 0xFC000000) == 0x94000000)
{
diff --git a/src/vm/gcenv.ee.cpp b/src/vm/gcenv.ee.cpp
index 6fe9a71def..b135173c0f 100644
--- a/src/vm/gcenv.ee.cpp
+++ b/src/vm/gcenv.ee.cpp
@@ -445,7 +445,7 @@ Thread* GCToEEInterface::CreateBackgroundThread(GCBackgroundThreadFunction threa
return NULL;
}
- if (threadStubArgs.thread->CreateNewThread(0, (LPTHREAD_START_ROUTINE)BackgroundThreadStub, &threadStubArgs))
+ if (threadStubArgs.thread->CreateNewThread(0, (LPTHREAD_START_ROUTINE)BackgroundThreadStub, &threadStubArgs, W("Background GC")))
{
threadStubArgs.thread->SetBackground (TRUE, FALSE);
threadStubArgs.thread->StartThread();
@@ -861,7 +861,7 @@ void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
#endif
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
- if (args->write_watch_table != nullptr)
+ if (g_sw_ww_enabled_for_gc_heap && (args->write_watch_table != nullptr))
{
assert(args->is_runtime_suspended);
g_sw_ww_table = args->write_watch_table;
@@ -888,6 +888,17 @@ void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
g_lowest_address = args->lowest_address;
VolatileStore(&g_highest_address, args->highest_address);
+
+#if defined(_ARM64_)
+ // Need to reupdate for changes to g_highest_address g_lowest_address
+ ::StompWriteBarrierResize(args->is_runtime_suspended, args->requires_upper_bounds_check);
+
+ if(!args->is_runtime_suspended)
+ {
+ // If runtime is not suspended, force updated state to be visible to all threads
+ MemoryBarrier();
+ }
+#endif
return;
case WriteBarrierOp::StompEphemeral:
// StompEphemeral requires a new ephemeral low and a new ephemeral high
@@ -945,6 +956,7 @@ void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
case WriteBarrierOp::SwitchToNonWriteWatch:
#ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP
assert(args->is_runtime_suspended && "the runtime must be suspended here!");
+ g_sw_ww_table = 0;
g_sw_ww_enabled_for_gc_heap = false;
::SwitchToNonWriteWatchBarrier(true);
#else
@@ -1031,7 +1043,7 @@ bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value)
if (strcmp(key, "gcConcurrent") == 0)
{
- *value = g_IGCconcurrent != 0;
+ *value = !!g_pConfig->GetGCconcurrent();
return true;
}
diff --git a/src/vm/gchandleutilities.h b/src/vm/gchandleutilities.h
index 762a37b524..08d27141e5 100644
--- a/src/vm/gchandleutilities.h
+++ b/src/vm/gchandleutilities.h
@@ -40,12 +40,13 @@ inline OBJECTREF ObjectFromHandle(OBJECTHANDLE handle)
{
_ASSERTE(handle);
-#ifdef _DEBUG_IMPL
+#if defined(_DEBUG_IMPL) && !defined(DACCESS_COMPILE)
+ // not allowed to dispatch virtually on a IGCHandleManager when compiling for DAC
DWORD context = (DWORD)GCHandleUtilities::GetGCHandleManager()->GetHandleContext(handle);
OBJECTREF objRef = ObjectToOBJECTREF(*(Object**)handle);
ValidateObjectAndAppDomain(objRef, ADIndex(context));
-#endif // _DEBUG_IMPL
+#endif // defined(_DEBUG_IMPL) && !defined(DACCESS_COMPILE)
// Wrap the raw OBJECTREF and return it
return UNCHECKED_OBJECTREF_TO_OBJECTREF(*PTR_UNCHECKED_OBJECTREF(handle));
diff --git a/src/vm/gdbjit.cpp b/src/vm/gdbjit.cpp
index 1857f60407..9557b0bf3e 100644
--- a/src/vm/gdbjit.cpp
+++ b/src/vm/gdbjit.cpp
@@ -15,6 +15,22 @@
#include "gdbjit.h"
#include "gdbjithelpers.h"
+__declspec(thread) bool tls_isSymReaderInProgress = false;
+
+#ifdef _DEBUG
+static void DumpElf(const char* methodName, const char *addr, size_t size)
+{
+ char dump[1024] = { 0, };
+
+ strcat(dump, methodName);
+ strcat(dump, ".o");
+
+ FILE *f = fopen(dump, "wb");
+ fwrite(addr, sizeof(char), size, f);
+ fclose(f);
+}
+#endif
+
TypeInfoBase*
GetTypeInfoFromTypeHandle(TypeHandle typeHandle,
NotifyGdb::PTK_TypeInfoMap pTypeMap,
@@ -648,46 +664,6 @@ struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
// END of GDB JIT interface
-/* Predefined section names */
-const char* SectionNames[] = {
- "",
- ".text",
- ".shstrtab",
- ".debug_str",
- ".debug_abbrev",
- ".debug_info",
- ".debug_pubnames",
- ".debug_pubtypes",
- ".debug_line",
- ".symtab",
- ".strtab"
- /* After the last (.strtab) section zero or more .thunk_* sections are generated.
-
- Each .thunk_* section contains a single .thunk_#.
- These symbols are mapped to methods (or trampolines) called by currently compiled method. */
-};
-
-const int SectionNamesCount = sizeof(SectionNames) / sizeof(SectionNames[0]); // Does not include .thunk_* sections
-
-/* Static data for section headers */
-struct SectionHeader {
- uint32_t m_type;
- uint64_t m_flags;
-} Sections[] = {
- {SHT_NULL, 0},
- {SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR},
- {SHT_STRTAB, 0},
- {SHT_PROGBITS, SHF_MERGE | SHF_STRINGS },
- {SHT_PROGBITS, 0},
- {SHT_PROGBITS, 0},
- {SHT_PROGBITS, 0},
- {SHT_PROGBITS, 0},
- {SHT_PROGBITS, 0},
- {SHT_SYMTAB, 0},
- {SHT_STRTAB, 0},
- {SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR}
-};
-
/* Static data for .debug_str section */
const char* DebugStrings[] = {
"CoreCLR", "" /* module name */, "" /* module path */
@@ -780,6 +756,11 @@ const int AbbrevTableSize = sizeof(AbbrevTable);
#define DWARF_LINE_RANGE 14
#define DWARF_OPCODE_BASE 13
+#ifdef FEATURE_GDBJIT_LANGID_CS
+/* TODO: use corresponding constant when it will be added to llvm */
+#define DW_LANG_MICROSOFT_CSHARP 0x9e57
+#endif
+
DwarfLineNumHeader LineNumHeader = {
0, 2, 0, 1, 1, DWARF_LINE_BASE, DWARF_LINE_RANGE, DWARF_OPCODE_BASE, {0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1}
};
@@ -793,7 +774,11 @@ struct __attribute__((packed)) DebugInfoCU
uint32_t m_cu_name;
uint32_t m_line_num;
} debugInfoCU = {
+#ifdef FEATURE_GDBJIT_LANGID_CS
+ 1, 0, DW_LANG_MICROSOFT_CSHARP, 0, 0
+#else
1, 0, DW_LANG_C89, 0, 0
+#endif
};
struct __attribute__((packed)) DebugInfoTryCatchSub
@@ -979,11 +964,14 @@ void TypeDefInfo::DumpStrings(char *ptr, int &offset)
void TypeDefInfo::DumpDebugInfo(char *ptr, int &offset)
{
- if (m_typedef_type_offset != 0)
+ if (m_is_visited && m_base_ptr == ptr)
{
return;
}
+ m_base_ptr = ptr;
+ m_is_visited = true;
+
if (ptr != nullptr)
{
DebugInfoTypeDef buf;
@@ -1054,10 +1042,14 @@ void PrimitiveTypeInfo::DumpStrings(char* ptr, int& offset)
void PrimitiveTypeInfo::DumpDebugInfo(char *ptr, int &offset)
{
- if (m_type_offset != 0)
+ if (m_is_visited && m_base_ptr == ptr)
{
return;
}
+
+ m_base_ptr = ptr;
+ m_is_visited = true;
+
m_typedef_info->DumpDebugInfo(ptr, offset);
if (ptr != nullptr)
@@ -1071,13 +1063,12 @@ void PrimitiveTypeInfo::DumpDebugInfo(char *ptr, int &offset)
memcpy(ptr + offset,
&bufType,
sizeof(DebugInfoType));
- m_type_offset = offset;
+
+ // Replace offset from real type to typedef
+ m_type_offset = m_typedef_info->m_typedef_type_offset;
}
offset += sizeof(DebugInfoType);
- // Replace offset from real type to typedef
- if (ptr != nullptr)
- m_type_offset = m_typedef_info->m_typedef_type_offset;
}
ClassTypeInfo::ClassTypeInfo(TypeHandle typeHandle, int num_members, FunctionMemberPtrArrayHolder &method)
@@ -1146,7 +1137,21 @@ void TypeMember::DumpDebugInfo(char* ptr, int& offset)
void TypeMember::DumpStaticDebugInfo(char* ptr, int& offset)
{
const int ptrSize = sizeof(TADDR);
- int bufSize = 0;
+ const int valueTypeBufSize = ptrSize + 6;
+ const int refTypeBufSize = ptrSize + 2;
+
+ bool isValueType = m_member_type->GetTypeHandle().GetSignatureCorElementType() ==
+ ELEMENT_TYPE_VALUETYPE;
+ int bufSize;
+ if (isValueType)
+ {
+ bufSize = valueTypeBufSize;
+ }
+ else
+ {
+ bufSize = refTypeBufSize;
+ }
+
if (ptr != nullptr)
{
DebugInfoStaticMember memberEntry;
@@ -1157,12 +1162,9 @@ void TypeMember::DumpStaticDebugInfo(char* ptr, int& offset)
// for value type static fields compute address as:
// addr = (*addr+sizeof(OBJECTREF))
- if (m_member_type->GetTypeHandle().GetSignatureCorElementType() ==
- ELEMENT_TYPE_VALUETYPE)
+ if (isValueType)
{
- bufSize = ptrSize + 6;
-
- char buf[ptrSize + 6] = {0};
+ char buf[valueTypeBufSize] = {0};
buf[0] = ptrSize + 5;
buf[1] = DW_OP_addr;
@@ -1180,9 +1182,7 @@ void TypeMember::DumpStaticDebugInfo(char* ptr, int& offset)
}
else
{
- bufSize = ptrSize + 2;
-
- char buf[ptrSize + 2] = {0};
+ char buf[refTypeBufSize] = {0};
buf[0] = ptrSize + 1;
buf[1] = DW_OP_addr;
@@ -1540,10 +1540,14 @@ void RefTypeInfo::DumpStrings(char* ptr, int& offset)
void RefTypeInfo::DumpDebugInfo(char* ptr, int& offset)
{
- if (m_type_offset != 0)
+ if (m_is_visited && m_base_ptr == ptr)
{
return;
}
+
+ m_base_ptr = ptr;
+ m_is_visited = true;
+
m_type_offset = offset;
offset += sizeof(DebugInfoRefType);
m_value_type->DumpDebugInfo(ptr, offset);
@@ -1563,10 +1567,14 @@ void RefTypeInfo::DumpDebugInfo(char* ptr, int& offset)
void NamedRefTypeInfo::DumpDebugInfo(char* ptr, int& offset)
{
- if (m_type_offset != 0)
+ if (m_is_visited && m_base_ptr == ptr)
{
return;
}
+
+ m_base_ptr = ptr;
+ m_is_visited = true;
+
m_type_offset = offset;
offset += sizeof(DebugInfoRefType) + sizeof(DebugInfoTypeDef);
m_value_type->DumpDebugInfo(ptr, offset);
@@ -1593,28 +1601,23 @@ void NamedRefTypeInfo::DumpDebugInfo(char* ptr, int& offset)
void ClassTypeInfo::DumpDebugInfo(char* ptr, int& offset)
{
- if (m_type_offset != 0)
+ if (m_is_visited && m_base_ptr == ptr)
{
return;
}
+ m_base_ptr = ptr;
+ m_is_visited = true;
+
if (m_parent != nullptr)
{
- if (m_parent->m_type_offset == 0)
- {
- m_parent->DumpDebugInfo(ptr, offset);
- }
- else if (RefTypeInfo* m_p = dynamic_cast<RefTypeInfo*>(m_parent))
- {
- if (m_p->m_value_type->m_type_offset == 0)
- m_p->m_value_type->DumpDebugInfo(ptr, offset);
- }
+ m_parent->DumpDebugInfo(ptr, offset);
}
// make sure that types of all members are dumped
for (int i = 0; i < m_num_members; ++i)
{
- if (members[i].m_member_type->m_type_offset == 0 && members[i].m_member_type != this)
+ if (members[i].m_member_type != this)
{
members[i].m_member_type->DumpDebugInfo(ptr, offset);
}
@@ -1678,14 +1681,16 @@ void ClassTypeInfo::DumpDebugInfo(char* ptr, int& offset)
void ArrayTypeInfo::DumpDebugInfo(char* ptr, int& offset)
{
- if (m_type_offset != 0)
+ if (m_is_visited && m_base_ptr == ptr)
{
return;
}
- if (m_elem_type->m_type_offset == 0)
- {
- m_elem_type->DumpDebugInfo(ptr, offset);
- }
+
+ m_base_ptr = ptr;
+ m_is_visited = true;
+
+ m_elem_type->DumpDebugInfo(ptr, offset);
+
if (ptr != nullptr)
{
DebugInfoArrayType arrType;
@@ -1754,11 +1759,12 @@ struct Elf_Symbol {
Elf_Symbol() : m_name(nullptr), m_off(0), m_value(0), m_section(0), m_size(0) {}
};
-static int countFuncs(const SymbolsInfo *lines, int nlines)
+template <class T>
+static int countFuncs(T &arr, int n)
{
int count = 0;
- for (int i = 0; i < nlines; i++) {
- if (lines[i].ilOffset == ICorDebugInfo::PROLOG)
+ for (int i = 0; i < n; i++) {
+ if (arr[i].ilOffset == ICorDebugInfo::PROLOG)
{
count++;
}
@@ -1766,10 +1772,11 @@ static int countFuncs(const SymbolsInfo *lines, int nlines)
return count;
}
-static int getNextPrologueIndex(int from, const SymbolsInfo *lines, int nlines)
+template <class T>
+static int getNextPrologueIndex(int from, T &arr, int n)
{
- for (int i = from; i < nlines; ++i) {
- if (lines[i].ilOffset == ICorDebugInfo::PROLOG)
+ for (int i = from; i < n; ++i) {
+ if (arr[i].ilOffset == ICorDebugInfo::PROLOG)
{
return i;
}
@@ -1777,14 +1784,672 @@ static int getNextPrologueIndex(int from, const SymbolsInfo *lines, int nlines)
return -1;
}
+static inline bool isListedModule(const WCHAR *wszModuleFile)
+{
+ static NewArrayHolder<WCHAR> wszModuleNames = nullptr;
+ static DWORD cBytesNeeded = 0;
+
+ // Get names of interesting modules from environment
+ if (wszModuleNames == nullptr && cBytesNeeded == 0)
+ {
+ DWORD cCharsNeeded = GetEnvironmentVariableW(W("CORECLR_GDBJIT"), NULL, 0);
+
+ if (cCharsNeeded == 0)
+ {
+ cBytesNeeded = 0xffffffff;
+ return false;
+ }
+
+ WCHAR *wszModuleNamesBuf = new WCHAR[cCharsNeeded+1];
+
+ cCharsNeeded = GetEnvironmentVariableW(W("CORECLR_GDBJIT"), wszModuleNamesBuf, cCharsNeeded);
+
+ if (cCharsNeeded == 0)
+ {
+ delete[] wszModuleNamesBuf;
+ cBytesNeeded = 0xffffffff;
+ return false;
+ }
+
+ wszModuleNames = wszModuleNamesBuf;
+ cBytesNeeded = cCharsNeeded + 1;
+ }
+ else if (wszModuleNames == nullptr)
+ {
+ return false;
+ }
+
+ _ASSERTE(wszModuleNames != nullptr && cBytesNeeded > 0);
+
+ BOOL isUserDebug = FALSE;
+
+ NewArrayHolder<WCHAR> wszModuleName = new WCHAR[cBytesNeeded];
+ LPWSTR pComma = wcsstr(wszModuleNames, W(","));
+ LPWSTR tmp = wszModuleNames;
+
+ while (pComma != NULL)
+ {
+ wcsncpy(wszModuleName, tmp, pComma - tmp);
+ wszModuleName[pComma - tmp] = W('\0');
+
+ if (wcscmp(wszModuleName, wszModuleFile) == 0)
+ {
+ isUserDebug = TRUE;
+ break;
+ }
+ tmp = pComma + 1;
+ pComma = wcsstr(tmp, W(","));
+ }
+ if (isUserDebug == FALSE)
+ {
+ wcsncpy(wszModuleName, tmp, wcslen(tmp));
+ wszModuleName[wcslen(tmp)] = W('\0');
+ if (wcscmp(wszModuleName, wszModuleFile) == 0)
+ {
+ isUserDebug = TRUE;
+ }
+ }
+
+ return isUserDebug;
+}
+
static NotifyGdb::AddrSet codeAddrs;
+class Elf_SectionTracker
+{
+ private:
+ unsigned int m_Flag;
+
+ private:
+ NewArrayHolder<char> m_NamePtr;
+ unsigned int m_NameLen;
+
+ private:
+ unsigned int m_Ind;
+ unsigned int m_Off;
+ unsigned int m_Len;
+
+ private:
+ Elf_Shdr m_Hdr;
+
+ private:
+ Elf_SectionTracker *m_Next;
+
+ public:
+ Elf_SectionTracker(const char *name, unsigned ind, unsigned off, uint32_t type, uint64_t flags);
+ ~Elf_SectionTracker();
+
+ public:
+ bool NeedHeaderUpdate() const;
+ void DisableHeaderUpdate();
+
+ public:
+ unsigned int GetIndex() const { return m_Ind; }
+ unsigned int GetOffset() const { return m_Off; }
+ unsigned int GetSize() const { return m_Len; }
+
+ public:
+ const char *GetName() const { return m_NamePtr; }
+ unsigned int GetNameLen() const { return m_NameLen; }
+
+ public:
+ Elf_SectionTracker *GetNext(void);
+ void SetNext(Elf_SectionTracker *next);
+
+ public:
+ void Forward(unsigned int len);
+
+ public:
+ Elf_Shdr *Header(void);
+ const Elf_Shdr *Header(void) const;
+
+};
+
+Elf_SectionTracker::Elf_SectionTracker(const char *name,
+ unsigned ind, unsigned off,
+ uint32_t type, uint64_t flags)
+ : m_Flag(0),
+ m_NamePtr(nullptr),
+ m_NameLen(0),
+ m_Ind(ind),
+ m_Off(off),
+ m_Len(0),
+ m_Next(nullptr)
+{
+ if (name)
+ {
+ unsigned int len = strlen(name);
+ char *ptr = new char[len + 1];
+
+ strncpy(ptr, name, len + 1);
+
+ m_NamePtr = ptr;
+ m_NameLen = len;
+ }
+
+ m_Hdr.sh_type = type;
+ m_Hdr.sh_flags = flags;
+ m_Hdr.sh_name = 0;
+ m_Hdr.sh_addr = 0;
+ m_Hdr.sh_offset = 0;
+ m_Hdr.sh_size = 0;
+ m_Hdr.sh_link = SHN_UNDEF;
+ m_Hdr.sh_info = 0;
+ m_Hdr.sh_addralign = 1;
+ m_Hdr.sh_entsize = 0;
+}
+
+Elf_SectionTracker::~Elf_SectionTracker()
+{
+}
+
+#define ESTF_NO_HEADER_UPDATE 0x00000001
+
+bool Elf_SectionTracker::NeedHeaderUpdate() const
+{
+ return !(m_Flag & ESTF_NO_HEADER_UPDATE);
+}
+
+void Elf_SectionTracker::DisableHeaderUpdate()
+{
+ m_Flag |= ESTF_NO_HEADER_UPDATE;
+}
+
+void Elf_SectionTracker::Forward(unsigned int len)
+{
+ m_Len += len;
+}
+
+void Elf_SectionTracker::SetNext(Elf_SectionTracker *next)
+{
+ m_Next = next;
+}
+
+Elf_SectionTracker *Elf_SectionTracker::GetNext(void)
+{
+ return m_Next;
+}
+
+Elf_Shdr *Elf_SectionTracker::Header(void)
+{
+ return &m_Hdr;
+}
+
+const Elf_Shdr *Elf_SectionTracker::Header(void) const
+{
+ return &m_Hdr;
+}
+
+class Elf_Buffer
+{
+ private:
+ NewArrayHolder<char> m_Ptr;
+ unsigned int m_Len;
+ unsigned int m_Pos;
+
+ public:
+ Elf_Buffer(unsigned int len);
+
+ private:
+ char *Ensure(unsigned int len);
+ void Forward(unsigned int len);
+
+ public:
+ unsigned int GetPos() const
+ {
+ return m_Pos;
+ }
+
+ char *GetPtr(unsigned int off = 0)
+ {
+ return m_Ptr.GetValue() + off;
+ }
+
+ public:
+ char *Reserve(unsigned int len);
+ template <typename T> T *ReserveT(unsigned int len = sizeof(T))
+ {
+ _ASSERTE(len >= sizeof(T));
+ return reinterpret_cast<T *>(Reserve(len));
+ }
+
+ public:
+ void Append(const char *src, unsigned int len);
+ template <typename T> void AppendT(T *src)
+ {
+ Append(reinterpret_cast<const char *>(src), sizeof(T));
+ }
+};
+
+Elf_Buffer::Elf_Buffer(unsigned int len)
+ : m_Ptr(new char[len])
+ , m_Len(len)
+ , m_Pos(0)
+{
+}
+
+char *Elf_Buffer::Ensure(unsigned int len)
+{
+ bool bAdjusted = false;
+
+ while (m_Pos + len > m_Len)
+ {
+ m_Len *= 2;
+ bAdjusted = true;
+ }
+
+ if (bAdjusted)
+ {
+ char *ptr = new char [m_Len * 2];
+ memcpy(ptr, m_Ptr.GetValue(), m_Pos);
+ m_Ptr = ptr;
+ }
+
+ return GetPtr(m_Pos);
+}
+
+void Elf_Buffer::Forward(unsigned int len)
+{
+ m_Pos += len;
+}
+
+char *Elf_Buffer::Reserve(unsigned int len)
+{
+ char *ptr = Ensure(len);
+ Forward(len);
+ return ptr;
+}
+
+void Elf_Buffer::Append(const char *src, unsigned int len)
+{
+ char *dst = Reserve(len);
+ memcpy(dst, src, len);
+}
+
+#define ELF_BUILDER_TEXT_SECTION_INDEX 1
+
+class Elf_Builder
+{
+ private:
+ Elf_Buffer m_Buffer;
+
+ private:
+ unsigned int m_SectionCount;
+ Elf_SectionTracker *m_First;
+ Elf_SectionTracker *m_Last;
+ Elf_SectionTracker *m_Curr;
+
+ public:
+ Elf_Builder();
+ ~Elf_Builder();
+
+ public:
+ unsigned int GetSectionCount(void) { return m_SectionCount; }
+
+ public:
+ void Initialize(PCODE codePtr, TADDR codeLen);
+
+ public:
+ Elf_SectionTracker *OpenSection(const char *name, uint32_t type, uint64_t flags);
+ void CloseSection();
+
+ public:
+ char *Reserve(unsigned int len);
+ template <typename T> T *ReserveT(unsigned int len = sizeof(T))
+ {
+ _ASSERTE(len >= sizeof(T));
+ return reinterpret_cast<T *>(Reserve(len));
+ }
+
+ public:
+ void Append(const char *src, unsigned int len);
+ template <typename T> void AppendT(T *src)
+ {
+ Append(reinterpret_cast<const char *>(src), sizeof(T));
+ }
+
+ public:
+ void Finalize(void);
+
+ public:
+ char *Export(size_t *len);
+};
+
+Elf_Builder::Elf_Builder()
+ : m_Buffer(128),
+ m_SectionCount(0),
+ m_First(nullptr),
+ m_Last(nullptr),
+ m_Curr(nullptr)
+{
+}
+
+Elf_Builder::~Elf_Builder()
+{
+ Elf_SectionTracker *curr = m_First;
+
+ while (curr)
+ {
+ Elf_SectionTracker *next = curr->GetNext();
+ delete curr;
+ curr = next;
+ }
+}
+
+void Elf_Builder::Initialize(PCODE codePtr, TADDR codeLen)
+{
+ //
+ // Reserve ELF Header
+ //
+ m_Buffer.Reserve(sizeof(Elf_Ehdr));
+
+ //
+ // Create NULL section
+ //
+ Elf_SectionTracker *null = OpenSection("", SHT_NULL, 0);
+ {
+ null->DisableHeaderUpdate();
+ null->Header()->sh_addralign = 0;
+ }
+ CloseSection();
+
+ //
+ // Create '.text' section
+ //
+ Elf_SectionTracker *text = OpenSection(".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR);
+ {
+ text->DisableHeaderUpdate();
+ text->Header()->sh_addr = codePtr;
+ text->Header()->sh_size = codeLen;
+
+ _ASSERTE(text->GetIndex() == ELF_BUILDER_TEXT_SECTION_INDEX);
+ }
+ CloseSection();
+}
+
+char *Elf_Builder::Reserve(unsigned int len)
+{
+ _ASSERTE(m_Curr != nullptr && "Section should be opened before");
+ char *ptr = m_Buffer.Reserve(len);
+ m_Curr->Forward(len);
+ return ptr;
+}
+
+void Elf_Builder::Append(const char *src, unsigned int len)
+{
+ _ASSERTE(m_Curr != nullptr && "Section should be opened before");
+ char *dst = Reserve(len);
+ memcpy(dst, src, len);
+}
+
+Elf_SectionTracker *Elf_Builder::OpenSection(const char *name, uint32_t type, uint64_t flags)
+{
+ _ASSERTE(m_Curr == nullptr && "Section should be closed before");
+
+ Elf_SectionTracker *next = new Elf_SectionTracker(name, m_SectionCount, m_Buffer.GetPos(), type, flags);
+
+ if (m_First == NULL)
+ {
+ m_First = next;
+ }
+
+ if (m_Last != NULL)
+ {
+ m_Last->SetNext(next);
+ }
+
+ m_SectionCount++;
+
+ m_Last = next;
+ m_Curr = next;
+
+ return next;
+}
+
+void Elf_Builder::CloseSection()
+{
+ _ASSERTE(m_Curr != nullptr && "Section should be opened before");
+ m_Curr = nullptr;
+}
+
+char *Elf_Builder::Export(size_t *pLen)
+{
+ unsigned int len = m_Buffer.GetPos();
+ const char *src = m_Buffer.GetPtr();
+ char *dst = new char[len];
+
+ memcpy(dst, src, len);
+
+ if (pLen)
+ {
+ *pLen = len;
+ }
+
+ return dst;
+}
+
+void Elf_Builder::Finalize()
+{
+ //
+ // Create '.shstrtab'
+ //
+ Elf_SectionTracker *shstrtab = OpenSection(".shstrtab", SHT_STRTAB, 0);
+ {
+ Elf_SectionTracker *curr = m_First;
+
+ while (curr)
+ {
+ unsigned int off = shstrtab->GetSize();
+ unsigned int len = curr->GetNameLen();
+
+ char *dst = Reserve(len + 1);
+ memcpy(dst, curr->GetName(), len);
+ dst[len] = '\0';
+
+ curr->Header()->sh_name = off;
+
+ curr = curr->GetNext();
+ }
+ }
+ CloseSection();
+
+ //
+ // Create Section Header(s) Table
+ //
+ unsigned int shtOffset = m_Buffer.GetPos();
+ {
+ Elf_SectionTracker *curr = m_First;
+
+ while (curr)
+ {
+ if (curr->NeedHeaderUpdate())
+ {
+ curr->Header()->sh_offset = curr->GetOffset();
+ curr->Header()->sh_size = curr->GetSize();
+ }
+ m_Buffer.AppendT(curr->Header());
+ curr = curr->GetNext();
+ }
+ }
+
+ //
+ // Update ELF Header
+ //
+ Elf_Ehdr *elfHeader = new (m_Buffer.GetPtr()) Elf_Ehdr;
+
+#ifdef _TARGET_ARM_
+ elfHeader->e_flags = EF_ARM_EABI_VER5;
+#ifdef ARM_SOFTFP
+ elfHeader->e_flags |= EF_ARM_SOFT_FLOAT;
+#else
+ elfHeader->e_flags |= EF_ARM_VFP_FLOAT;
+#endif
+#endif
+ elfHeader->e_shoff = shtOffset;
+ elfHeader->e_shentsize = sizeof(Elf_Shdr);
+ elfHeader->e_shnum = m_SectionCount;
+ elfHeader->e_shstrndx = shstrtab->GetIndex();
+}
+
+#ifdef FEATURE_GDBJIT_FRAME
+struct __attribute__((packed)) Length
+{
+ UINT32 value;
+
+ Length &operator=(UINT32 n)
+ {
+ value = n;
+ return *this;
+ }
+
+ Length()
+ {
+ value = 0;
+ }
+};
+
+struct __attribute__((packed)) CIE
+{
+ Length length;
+ UINT32 id;
+ UINT8 version;
+ UINT8 augmentation;
+ UINT8 code_alignment_factor;
+ INT8 data_alignment_factor;
+ UINT8 return_address_register;
+ UINT8 instructions[0];
+};
+
+struct __attribute__((packed)) FDE
+{
+ Length length;
+ UINT32 cie;
+ PCODE initial_location;
+ TADDR address_range;
+ UINT8 instructions[0];
+};
+
+static void BuildDebugFrame(Elf_Builder &elfBuilder, PCODE pCode, TADDR codeSize)
+{
+#if defined(_TARGET_ARM_)
+ const unsigned int code_alignment_factor = 2;
+ const int data_alignment_factor = -4;
+
+ UINT8 cieCode[] = {
+ // DW_CFA_def_cfa 13[sp], 0
+ 0x0c, 0x0d, 0x00,
+ };
+
+ UINT8 fdeCode[] = {
+ // DW_CFA_advance_loc 1
+ 0x02, 0x01,
+ // DW_CFA_def_cfa_offset 8
+ 0x0e, 0x08,
+ // DW_CFA_offset 11(r11), -8(= -4 * 2)
+ (0x02 << 6) | 0x0b, 0x02,
+ // DW_CFA_offset 14(lr), -4(= -4 * 1)
+ (0x02 << 6) | 0x0e, 0x01,
+ // DW_CFA_def_cfa_register 11(r11)
+ 0x0d, 0x0b,
+ };
+#elif defined(_TARGET_X86_)
+ const unsigned int code_alignment_factor = 1;
+ const int data_alignment_factor = -4;
+
+ UINT8 cieCode[] = {
+ // DW_CFA_def_cfa 4(esp), 4
+ 0x0c, 0x04, 0x04,
+ // DW_CFA_offset 8(eip), -4(= -4 * 1)
+ (0x02 << 6) | 0x08, 0x01,
+ };
+
+ UINT8 fdeCode[] = {
+ // DW_CFA_advance_loc 1
+ 0x02, 0x01,
+ // DW_CFA_def_cfa_offset 8
+ 0x0e, 0x08,
+ // DW_CFA_offset 5(ebp), -8(= -4 * 2)
+ (0x02 << 6) | 0x05, 0x02,
+ // DW_CFA_def_cfa_register 5(ebp)
+ 0x0d, 0x05,
+ };
+#elif defined(_TARGET_AMD64_)
+ const unsigned int code_alignment_factor = 1;
+ const int data_alignment_factor = -8;
+
+ UINT8 cieCode[] = {
+ // DW_CFA_def_cfa 7(rsp), 8
+ 0x0c, 0x07, 0x08,
+ // DW_CFA_offset 16, -16 (= -8 * 2)
+ (0x02 << 6) | 0x10, 0x01,
+ };
+
+ UINT8 fdeCode[] = {
+ // DW_CFA_advance_loc(1)
+ 0x02, 0x01,
+ // DW_CFA_def_cfa_offset(16)
+ 0x0e, 0x10,
+ // DW_CFA_offset 6, -16 (= -8 * 2)
+ (0x02 << 6) | 0x06, 0x02,
+ // DW_CFA_def_cfa_register(6)
+ 0x0d, 0x06,
+ };
+#else
+#error "Unsupported architecture"
+#endif
+
+ elfBuilder.OpenSection(".debug_frame", SHT_PROGBITS, 0);
+
+ //
+ // Common Information Entry
+ //
+ int cieLen = ALIGN_UP(sizeof(CIE) + sizeof(cieCode), ADDRESS_SIZE) + sizeof(Length);
+
+ CIE *pCIE = elfBuilder.ReserveT<CIE>(cieLen);
+
+ memset(pCIE, 0, cieLen);
+
+ pCIE->length = cieLen - sizeof(Length);
+ pCIE->id = 0xffffffff;
+ pCIE->version = 3;
+ pCIE->augmentation = 0;
+ Leb128Encode(code_alignment_factor, reinterpret_cast<char *>(&pCIE->code_alignment_factor), 1);
+ Leb128Encode(data_alignment_factor, reinterpret_cast<char *>(&pCIE->data_alignment_factor), 1);
+
+ pCIE->return_address_register = 0;
+
+ memcpy(&pCIE->instructions, cieCode, sizeof(cieCode));
+
+ //
+ // Frame Description Entry
+ //
+ int fdeLen = ALIGN_UP((sizeof(FDE) + sizeof(fdeCode)), ADDRESS_SIZE) + sizeof(Length);
+
+ FDE *pFDE = elfBuilder.ReserveT<FDE>(fdeLen);
+
+ memset(pFDE, 0, fdeLen);
+
+ pFDE->length = fdeLen - sizeof(Length);
+ pFDE->cie = 0;
+ pFDE->initial_location = pCode;
+ pFDE->address_range = codeSize;
+ memcpy(&pFDE->instructions, fdeCode, sizeof(fdeCode));
+
+ elfBuilder.CloseSection();
+}
+#endif // FEATURE_GDBJIT_FRAME
+
/* Create ELF/DWARF debug info for jitted method */
-void NotifyGdb::MethodCompiled(MethodDesc* methodDescPtr)
+void NotifyGdb::MethodPrepared(MethodDesc* methodDescPtr)
{
EX_TRY
{
- NotifyGdb::OnMethodCompiled(methodDescPtr);
+ if (!tls_isSymReaderInProgress)
+ {
+ tls_isSymReaderInProgress = true;
+ NotifyGdb::OnMethodPrepared(methodDescPtr);
+ tls_isSymReaderInProgress = false;
+ }
}
EX_CATCH
{
@@ -1792,21 +2457,19 @@ void NotifyGdb::MethodCompiled(MethodDesc* methodDescPtr)
EX_END_CATCH(SwallowAllExceptions);
}
-void NotifyGdb::OnMethodCompiled(MethodDesc* methodDescPtr)
+void NotifyGdb::OnMethodPrepared(MethodDesc* methodDescPtr)
{
- int symbolCount = 0;
- NewArrayHolder<Elf_Symbol> symbolNames;
-
PCODE pCode = methodDescPtr->GetNativeCode();
if (pCode == NULL)
return;
- unsigned int symInfoLen = 0;
- NewArrayHolder<SymbolsInfo> symInfo = nullptr;
- LocalsInfo locals;
/* Get method name & size of jitted code */
- LPCUTF8 methodName = methodDescPtr->GetName();
EECodeInfo codeInfo(pCode);
+ if (!codeInfo.IsValid())
+ {
+ return;
+ }
+
TADDR codeSize = codeInfo.GetCodeManager()->GetFunctionSize(codeInfo.GetGCInfoToken());
pCode = PCODEToPINSTR(pCode);
@@ -1829,67 +2492,195 @@ void NotifyGdb::OnMethodCompiled(MethodDesc* methodDescPtr)
if (length == 0)
return;
- static NewArrayHolder<WCHAR> wszModuleNames = nullptr;
- DWORD cCharsNeeded = 0;
+ bool bNotify = false;
- // Get names of interesting modules from environment
- if (wszModuleNames == nullptr)
+ Elf_Builder elfBuilder;
+
+ elfBuilder.Initialize(pCode, codeSize);
+
+#ifdef FEATURE_GDBJIT_FRAME
+ if (g_pConfig->ShouldEmitDebugFrame())
{
- cCharsNeeded = GetEnvironmentVariableW(W("CORECLR_GDBJIT"), NULL, 0);
+ bool bEmitted = EmitFrameInfo(elfBuilder, pCode, codeSize);
+ bNotify = bNotify || bEmitted;
+ }
+#endif
- if(cCharsNeeded == 0)
- return;
- wszModuleNames = new WCHAR[cCharsNeeded+1];
- cCharsNeeded = GetEnvironmentVariableW(W("CORECLR_GDBJIT"), wszModuleNames, cCharsNeeded);
- if(cCharsNeeded == 0)
- return;
+ if (isListedModule(wszModuleFile))
+ {
+ bool bEmitted = EmitDebugInfo(elfBuilder, methodDescPtr, pCode, codeSize, szModuleFile);
+ bNotify = bNotify || bEmitted;
}
+#ifdef FEATURE_GDBJIT_SYMTAB
else
{
- cCharsNeeded = wcslen(wszModuleNames);
+ bool bEmitted = EmitSymtab(elfBuilder, methodDescPtr, pCode, codeSize);
+ bNotify = bNotify || bEmitted;
}
+#endif
- BOOL isUserDebug = FALSE;
+ if (!bNotify)
+ {
+ return;
+ }
- NewArrayHolder<WCHAR> wszModuleName = new WCHAR[cCharsNeeded+1];
- LPWSTR pComma = wcsstr(wszModuleNames, W(","));
- LPWSTR tmp = wszModuleNames;
+ elfBuilder.Finalize();
- while (pComma != NULL)
+ char *symfile_addr = NULL;
+ size_t symfile_size = 0;
+
+ symfile_addr = elfBuilder.Export(&symfile_size);
+
+#ifdef _DEBUG
+ LPCUTF8 methodName = methodDescPtr->GetName();
+
+ if (g_pConfig->ShouldDumpElfOnMethod(methodName))
{
- wcsncpy(wszModuleName, tmp, pComma - tmp);
- wszModuleName[pComma - tmp] = W('\0');
+ DumpElf(methodName, symfile_addr, symfile_size);
+ }
+#endif
- if (wcscmp(wszModuleName, wszModuleFile) == 0)
- {
- isUserDebug = TRUE;
- break;
- }
- tmp = pComma + 1;
- pComma = wcsstr(tmp, W(","));
+ /* Create GDB JIT structures */
+ NewHolder<jit_code_entry> jit_symbols = new jit_code_entry;
+
+ /* Fill the new entry */
+ jit_symbols->next_entry = jit_symbols->prev_entry = 0;
+ jit_symbols->symfile_addr = symfile_addr;
+ jit_symbols->symfile_size = symfile_size;
+
+ /* Link into list */
+ jit_code_entry *head = __jit_debug_descriptor.first_entry;
+ __jit_debug_descriptor.first_entry = jit_symbols;
+ if (head != 0)
+ {
+ jit_symbols->next_entry = head;
+ head->prev_entry = jit_symbols;
}
- if (isUserDebug == FALSE)
+
+ jit_symbols.SuppressRelease();
+
+ /* Notify the debugger */
+ __jit_debug_descriptor.relevant_entry = jit_symbols;
+ __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
+ __jit_debug_register_code();
+}
+
+#ifdef FEATURE_GDBJIT_FRAME
+bool NotifyGdb::EmitFrameInfo(Elf_Builder &elfBuilder, PCODE pCode, TADDR codeSize)
+{
+ BuildDebugFrame(elfBuilder, pCode, codeSize);
+ return true;
+}
+#endif // FEATURE_GDBJIT_FRAME
+
+#ifdef FEATURE_GDBJIT_SYMTAB
+bool NotifyGdb::EmitSymtab(Elf_Builder &elfBuilder, MethodDesc* methodDescPtr, PCODE pCode, TADDR codeSize)
+{
+ NewArrayHolder<DebuggerILToNativeMap> map = nullptr;
+ NewArrayHolder<Elf_Symbol> symbols = nullptr;
+ NewArrayHolder<NewArrayHolder<char>> symbolNames = nullptr;
+
+ ULONG32 numMap;
+ int symbolCount;
+
+ LPCUTF8 methodName = methodDescPtr->GetName();
+
+ if (GetMethodNativeMap(methodDescPtr, &numMap, map, NULL, NULL) == S_OK)
{
- wcsncpy(wszModuleName, tmp, wcslen(tmp));
- wszModuleName[wcslen(tmp)] = W('\0');
- if (wcscmp(wszModuleName, wszModuleFile) == 0)
+ int methodCount = countFuncs(map, numMap);
+ symbolCount = methodCount + 1;
+ symbols = new Elf_Symbol[symbolCount];
+
+ if (methodCount > 1)
+ symbolNames = new NewArrayHolder<char>[methodCount - 1];
+
+ int startIndex = getNextPrologueIndex(0, map, numMap);
+
+ int methodNameSize = strlen(methodName) + 10;
+
+ for (int i = 1; i < symbolCount; ++i)
{
- isUserDebug = TRUE;
+ int endIndex = getNextPrologueIndex(startIndex + 1, map, numMap);
+
+ PCODE methodStart = map[startIndex].nativeStartOffset;
+ TADDR methodSize = endIndex == -1 ? codeSize - methodStart : map[endIndex].nativeStartOffset - methodStart;
+
+ if (i == 1)
+ {
+ symbols[i].m_name = methodName;
+ }
+ else
+ {
+ int symbolNameIndex = i - 2;
+ symbolNames[symbolNameIndex] = new char[methodNameSize];
+ sprintf_s(symbolNames[symbolNameIndex], methodNameSize, "%s_%d", methodName, symbolNameIndex + 1);
+ symbols[i].m_name = symbolNames[symbolNameIndex];
+ }
+
+ symbols[i].m_value = pCode + methodStart;
+ symbols[i].m_size = methodSize;
+
+ startIndex = endIndex;
}
}
+ else
+ {
+ symbolCount = 2;
+ symbols = new Elf_Symbol[symbolCount];
- if (isUserDebug == FALSE)
+ symbols[1].m_name = methodName;
+ symbols[1].m_value = pCode;
+ symbols[1].m_size = codeSize;
+ }
+
+ symbols[0].m_name = "";
+
+ MemBuf sectSymTab, sectStrTab;
+
+ if (!BuildStringTableSection(sectStrTab, symbols, symbolCount))
{
- return;
+ return false;
+ }
+
+ if (!BuildSymbolTableSection(sectSymTab, pCode, codeSize, symbolCount - 1, symbols, symbolCount, 0))
+ {
+ return false;
}
+ Elf_SectionTracker *strtab = elfBuilder.OpenSection(".strtab", SHT_STRTAB, 0);
+ elfBuilder.Append(sectStrTab.MemPtr, sectStrTab.MemSize);
+ elfBuilder.CloseSection();
+
+ Elf_SectionTracker *symtab = elfBuilder.OpenSection(".symtab", SHT_SYMTAB, 0);
+ elfBuilder.Append(sectSymTab.MemPtr, sectSymTab.MemSize);
+ symtab->Header()->sh_link = strtab->GetIndex();
+ symtab->Header()->sh_entsize = sizeof(Elf_Sym);
+ elfBuilder.CloseSection();
+
+ return true;
+}
+#endif // FEATURE_GDBJIT_SYMTAB
+
+bool NotifyGdb::EmitDebugInfo(Elf_Builder &elfBuilder, MethodDesc* methodDescPtr, PCODE pCode, TADDR codeSize, const char *szModuleFile)
+{
+ unsigned int thunkIndexBase = elfBuilder.GetSectionCount();
+
+ LPCUTF8 methodName = methodDescPtr->GetName();
+
+ int symbolCount = 0;
+ NewArrayHolder<Elf_Symbol> symbolNames;
+
+ unsigned int symInfoLen = 0;
+ NewArrayHolder<SymbolsInfo> symInfo = nullptr;
+ LocalsInfo locals;
+
NewHolder<TK_TypeInfoMap> pTypeMap = new TK_TypeInfoMap();
/* Get debug info for method from portable PDB */
HRESULT hr = GetDebugInfoFromPDB(methodDescPtr, symInfo, symInfoLen, locals);
if (FAILED(hr) || symInfoLen == 0)
{
- return;
+ return false;
}
int method_count = countFuncs(symInfo, symInfoLen);
@@ -1900,7 +2691,7 @@ void NotifyGdb::OnMethodCompiled(MethodDesc* methodDescPtr)
/* Collect addresses of thunks called by method */
if (!CollectCalledMethods(pCalledMethods, (TADDR)methodDescPtr->GetNativeCode(), method, symbolNames, symbolCount))
{
- return;
+ return false;
}
pCH->SetCalledMethods(NULL);
@@ -1916,7 +2707,7 @@ void NotifyGdb::OnMethodCompiled(MethodDesc* methodDescPtr)
if (firstLineIndex >= symInfoLen)
{
- return;
+ return false;
}
int start_index = getNextPrologueIndex(0, symInfo, symInfoLen);
@@ -1950,19 +2741,19 @@ void NotifyGdb::OnMethodCompiled(MethodDesc* methodDescPtr)
start_index = end_index;
}
- MemBuf elfHeader, sectHeaders, sectStr, sectSymTab, sectStrTab, dbgInfo, dbgAbbrev, dbgPubname, dbgPubType, dbgLine,
- dbgStr, elfFile;
+ MemBuf sectSymTab, sectStrTab, dbgInfo, dbgAbbrev, dbgPubname, dbgPubType, dbgLine,
+ dbgStr;
/* Build .debug_abbrev section */
if (!BuildDebugAbbrev(dbgAbbrev))
{
- return;
+ return false;
}
/* Build .debug_line section */
if (!BuildLineTable(dbgLine, pCode, codeSize, symInfo, symInfoLen))
{
- return;
+ return false;
}
DebugStrings[1] = szModuleFile;
@@ -1970,13 +2761,13 @@ void NotifyGdb::OnMethodCompiled(MethodDesc* methodDescPtr)
/* Build .debug_str section */
if (!BuildDebugStrings(dbgStr, pTypeMap, method))
{
- return;
+ return false;
}
/* Build .debug_info section */
if (!BuildDebugInfo(dbgInfo, pTypeMap, method))
{
- return;
+ return false;
}
for (int i = 0; i < method.GetCount(); ++i)
@@ -1988,13 +2779,13 @@ void NotifyGdb::OnMethodCompiled(MethodDesc* methodDescPtr)
/* Build .debug_pubname section */
if (!BuildDebugPub(dbgPubname, methodName, dbgInfo.MemSize, 0x28))
{
- return;
+ return false;
}
/* Build debug_pubtype section */
if (!BuildDebugPub(dbgPubType, "int", dbgInfo.MemSize, 0x1a))
{
- return;
+ return false;
}
/* Build .strtab section */
@@ -2008,158 +2799,64 @@ void NotifyGdb::OnMethodCompiled(MethodDesc* methodDescPtr)
}
if (!BuildStringTableSection(sectStrTab, symbolNames, symbolCount))
{
- return;
+ return false;
}
/* Build .symtab section */
- if (!BuildSymbolTableSection(sectSymTab, pCode, codeSize, method, symbolNames, symbolCount))
+ if (!BuildSymbolTableSection(sectSymTab, pCode, codeSize, method.GetCount(), symbolNames, symbolCount, thunkIndexBase))
{
- return;
+ return false;
}
- /* Build section headers table and section names table */
- BuildSectionTables(sectHeaders, sectStr, method, symbolCount);
-
- /* Patch section offsets & sizes */
- long offset = sizeof(Elf_Ehdr);
- Elf_Shdr* pShdr = reinterpret_cast<Elf_Shdr*>(sectHeaders.MemPtr.GetValue());
- ++pShdr; // .text
- pShdr->sh_addr = pCode;
- pShdr->sh_size = codeSize;
- ++pShdr; // .shstrtab
- pShdr->sh_offset = offset;
- pShdr->sh_size = sectStr.MemSize;
- offset += sectStr.MemSize;
- ++pShdr; // .debug_str
- pShdr->sh_offset = offset;
- pShdr->sh_size = dbgStr.MemSize;
- offset += dbgStr.MemSize;
- ++pShdr; // .debug_abbrev
- pShdr->sh_offset = offset;
- pShdr->sh_size = dbgAbbrev.MemSize;
- offset += dbgAbbrev.MemSize;
- ++pShdr; // .debug_info
- pShdr->sh_offset = offset;
- pShdr->sh_size = dbgInfo.MemSize;
- offset += dbgInfo.MemSize;
- ++pShdr; // .debug_pubnames
- pShdr->sh_offset = offset;
- pShdr->sh_size = dbgPubname.MemSize;
- offset += dbgPubname.MemSize;
- ++pShdr; // .debug_pubtypes
- pShdr->sh_offset = offset;
- pShdr->sh_size = dbgPubType.MemSize;
- offset += dbgPubType.MemSize;
- ++pShdr; // .debug_line
- pShdr->sh_offset = offset;
- pShdr->sh_size = dbgLine.MemSize;
- offset += dbgLine.MemSize;
- ++pShdr; // .symtab
- pShdr->sh_offset = offset;
- pShdr->sh_size = sectSymTab.MemSize;
- pShdr->sh_link = GetSectionIndex(".strtab");
- offset += sectSymTab.MemSize;
- ++pShdr; // .strtab
- pShdr->sh_offset = offset;
- pShdr->sh_size = sectStrTab.MemSize;
- offset += sectStrTab.MemSize;
-
- // .thunks
for (int i = 1 + method.GetCount(); i < symbolCount; i++)
{
- ++pShdr;
- pShdr->sh_addr = PCODEToPINSTR(symbolNames[i].m_value);
- pShdr->sh_size = 8;
- }
+ char name[256];
- /* Build ELF header */
- if (!BuildELFHeader(elfHeader))
- {
- return;
+ sprintf_s(name, _countof(name), ".thunk_%i", i);
+
+ Elf_SectionTracker *thunk = elfBuilder.OpenSection(name, SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR);
+ thunk->DisableHeaderUpdate();
+ elfBuilder.CloseSection();
}
- Elf_Ehdr* header = reinterpret_cast<Elf_Ehdr*>(elfHeader.MemPtr.GetValue());
-#ifdef _TARGET_ARM_
- header->e_flags = EF_ARM_EABI_VER5;
-#ifdef ARM_SOFTFP
- header->e_flags |= EF_ARM_SOFT_FLOAT;
-#else
- header->e_flags |= EF_ARM_VFP_FLOAT;
-#endif
-#endif
- header->e_shoff = offset;
- header->e_shentsize = sizeof(Elf_Shdr);
- int thunks_count = symbolCount - method.GetCount() - 1;
- header->e_shnum = SectionNamesCount + thunks_count;
- header->e_shstrndx = GetSectionIndex(".shstrtab");
-
- /* Build ELF image in memory */
- elfFile.MemSize = elfHeader.MemSize + sectStr.MemSize + dbgStr.MemSize + dbgAbbrev.MemSize + dbgInfo.MemSize +
- dbgPubname.MemSize + dbgPubType.MemSize + dbgLine.MemSize + sectSymTab.MemSize +
- sectStrTab.MemSize + sectHeaders.MemSize;
- elfFile.MemPtr = new char[elfFile.MemSize];
-
- /* Copy section data */
- offset = 0;
- memcpy(elfFile.MemPtr, elfHeader.MemPtr, elfHeader.MemSize);
- offset += elfHeader.MemSize;
- memcpy(elfFile.MemPtr + offset, sectStr.MemPtr, sectStr.MemSize);
- offset += sectStr.MemSize;
- memcpy(elfFile.MemPtr + offset, dbgStr.MemPtr, dbgStr.MemSize);
- offset += dbgStr.MemSize;
- memcpy(elfFile.MemPtr + offset, dbgAbbrev.MemPtr, dbgAbbrev.MemSize);
- offset += dbgAbbrev.MemSize;
- memcpy(elfFile.MemPtr + offset, dbgInfo.MemPtr, dbgInfo.MemSize);
- offset += dbgInfo.MemSize;
- memcpy(elfFile.MemPtr + offset, dbgPubname.MemPtr, dbgPubname.MemSize);
- offset += dbgPubname.MemSize;
- memcpy(elfFile.MemPtr + offset, dbgPubType.MemPtr, dbgPubType.MemSize);
- offset += dbgPubType.MemSize;
- memcpy(elfFile.MemPtr + offset, dbgLine.MemPtr, dbgLine.MemSize);
- offset += dbgLine.MemSize;
- memcpy(elfFile.MemPtr + offset, sectSymTab.MemPtr, sectSymTab.MemSize);
- offset += sectSymTab.MemSize;
- memcpy(elfFile.MemPtr + offset, sectStrTab.MemPtr, sectStrTab.MemSize);
- offset += sectStrTab.MemSize;
-
- memcpy(elfFile.MemPtr + offset, sectHeaders.MemPtr, sectHeaders.MemSize);
-
- elfFile.MemPtr.SuppressRelease();
-
-#ifdef GDBJIT_DUMPELF
- DumpElf(methodName, elfFile);
-#endif
- /* Create GDB JIT structures */
- NewHolder<jit_code_entry> jit_symbols = new jit_code_entry;
+ elfBuilder.OpenSection(".debug_str", SHT_PROGBITS, SHF_MERGE | SHF_STRINGS);
+ elfBuilder.Append(dbgStr.MemPtr, dbgStr.MemSize);
+ elfBuilder.CloseSection();
- /* Fill the new entry */
- jit_symbols->next_entry = jit_symbols->prev_entry = 0;
- jit_symbols->symfile_addr = elfFile.MemPtr;
- jit_symbols->symfile_size = elfFile.MemSize;
+ elfBuilder.OpenSection(".debug_abbrev", SHT_PROGBITS, 0);
+ elfBuilder.Append(dbgAbbrev.MemPtr, dbgAbbrev.MemSize);
+ elfBuilder.CloseSection();
- /* Link into list */
- jit_code_entry *head = __jit_debug_descriptor.first_entry;
- __jit_debug_descriptor.first_entry = jit_symbols;
- if (head != 0)
- {
- jit_symbols->next_entry = head;
- head->prev_entry = jit_symbols;
- }
+ elfBuilder.OpenSection(".debug_info", SHT_PROGBITS, 0);
+ elfBuilder.Append(dbgInfo.MemPtr, dbgInfo.MemSize);
+ elfBuilder.CloseSection();
- jit_symbols.SuppressRelease();
+ elfBuilder.OpenSection(".debug_pubnames", SHT_PROGBITS, 0);
+ elfBuilder.Append(dbgPubname.MemPtr, dbgPubname.MemSize);
+ elfBuilder.CloseSection();
- /* Notify the debugger */
- __jit_debug_descriptor.relevant_entry = jit_symbols;
- __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
- __jit_debug_register_code();
-}
+ elfBuilder.OpenSection(".debug_pubtypes", SHT_PROGBITS, 0);
+ elfBuilder.Append(dbgPubType.MemPtr, dbgPubType.MemSize);
+ elfBuilder.CloseSection();
-void NotifyGdb::MethodDropped(MethodDesc* methodDescPtr)
-{
- static const int textSectionIndex = GetSectionIndex(".text");
+ elfBuilder.OpenSection(".debug_line", SHT_PROGBITS, 0);
+ elfBuilder.Append(dbgLine.MemPtr, dbgLine.MemSize);
+ elfBuilder.CloseSection();
- if (textSectionIndex < 0)
- return;
+ Elf_SectionTracker *strtab = elfBuilder.OpenSection(".strtab", SHT_STRTAB, 0);
+ elfBuilder.Append(sectStrTab.MemPtr, sectStrTab.MemSize);
+ elfBuilder.CloseSection();
+
+ Elf_SectionTracker *symtab = elfBuilder.OpenSection(".symtab", SHT_SYMTAB, 0);
+ elfBuilder.Append(sectSymTab.MemPtr, sectSymTab.MemSize);
+ symtab->Header()->sh_link = strtab->GetIndex();
+ symtab->Header()->sh_entsize = sizeof(Elf_Sym);
+ elfBuilder.CloseSection();
+
+ return true;
+}
+void NotifyGdb::MethodPitched(MethodDesc* methodDescPtr)
+{
PCODE pCode = methodDescPtr->GetNativeCode();
if (pCode == NULL)
@@ -2173,7 +2870,7 @@ void NotifyGdb::MethodDropped(MethodDesc* methodDescPtr)
const Elf_Ehdr* pEhdr = reinterpret_cast<const Elf_Ehdr*>(ptr);
const Elf_Shdr* pShdr = reinterpret_cast<const Elf_Shdr*>(ptr + pEhdr->e_shoff);
- pShdr += textSectionIndex; // bump to .text section
+ pShdr += ELF_BUILDER_TEXT_SECTION_INDEX; // bump to .text section
if (pShdr->sh_addr == pCode)
{
/* Notify the debugger */
@@ -2665,11 +3362,10 @@ bool NotifyGdb::BuildStringTableSection(MemBuf& buf, NewArrayHolder<Elf_Symbol>
}
/* Build ELF .symtab section */
-bool NotifyGdb::BuildSymbolTableSection(MemBuf& buf, PCODE addr, TADDR codeSize, FunctionMemberPtrArrayHolder &method,
- NewArrayHolder<Elf_Symbol> &symbolNames, int symbolCount)
+bool NotifyGdb::BuildSymbolTableSection(MemBuf& buf, PCODE addr, TADDR codeSize, int methodCount,
+ NewArrayHolder<Elf_Symbol> &symbolNames, int symbolCount,
+ unsigned int thunkIndexBase)
{
- static const int textSectionIndex = GetSectionIndex(".text");
-
buf.MemSize = symbolCount * sizeof(Elf_Sym);
buf.MemPtr = new char[buf.MemSize];
@@ -2682,22 +3378,22 @@ bool NotifyGdb::BuildSymbolTableSection(MemBuf& buf, PCODE addr, TADDR codeSize,
sym[0].st_size = 0;
sym[0].st_shndx = SHN_UNDEF;
- for (int i = 1; i < 1 + method.GetCount(); ++i)
+ for (int i = 1; i < 1 + methodCount; ++i)
{
sym[i].st_name = symbolNames[i].m_off;
sym[i].setBindingAndType(STB_GLOBAL, STT_FUNC);
sym[i].st_other = 0;
sym[i].st_value = PINSTRToPCODE(symbolNames[i].m_value - addr);
- sym[i].st_shndx = textSectionIndex;
+ sym[i].st_shndx = ELF_BUILDER_TEXT_SECTION_INDEX;
sym[i].st_size = symbolNames[i].m_size;
}
- for (int i = 1 + method.GetCount(); i < symbolCount; ++i)
+ for (int i = 1 + methodCount; i < symbolCount; ++i)
{
sym[i].st_name = symbolNames[i].m_off;
sym[i].setBindingAndType(STB_GLOBAL, STT_FUNC);
sym[i].st_other = 0;
- sym[i].st_shndx = SectionNamesCount + (i - (1 + method.GetCount())); // .thunks section index
+ sym[i].st_shndx = thunkIndexBase + (i - (1 + methodCount)); // .thunks section index
sym[i].st_size = 8;
#ifdef _TARGET_ARM_
sym[i].st_value = 1; // for THUMB code
@@ -2708,97 +3404,6 @@ bool NotifyGdb::BuildSymbolTableSection(MemBuf& buf, PCODE addr, TADDR codeSize,
return true;
}
-int NotifyGdb::GetSectionIndex(const char *sectName)
-{
- for (int i = 0; i < SectionNamesCount; ++i)
- if (strcmp(SectionNames[i], sectName) == 0)
- return i;
- return -1;
-}
-
-/* Build the ELF section headers table and section names table */
-void NotifyGdb::BuildSectionTables(MemBuf& sectBuf, MemBuf& strBuf, FunctionMemberPtrArrayHolder &method,
- int symbolCount)
-{
- static const int symtabSectionIndex = GetSectionIndex(".symtab");
- static const int nullSectionIndex = GetSectionIndex("");
-
- const int thunks_count = symbolCount - 1 - method.GetCount();
-
- // Approximate length of single section name.
- // Used only to reduce memory reallocations.
- static const int SECT_NAME_LENGTH = 11;
-
- strBuf.Resize(SECT_NAME_LENGTH * (SectionNamesCount + thunks_count));
-
- Elf_Shdr* sectionHeaders = new Elf_Shdr[SectionNamesCount + thunks_count];
- sectBuf.MemPtr = reinterpret_cast<char*>(sectionHeaders);
- sectBuf.MemSize = sizeof(Elf_Shdr) * (SectionNamesCount + thunks_count);
-
- Elf_Shdr* pSh = sectionHeaders;
- uint32_t sectNameOffset = 0;
-
- // Additional memory for remaining section names,
- // grows twice on each reallocation.
- int addSize = SECT_NAME_LENGTH;
-
- // Fill section headers and names
- for (int i = 0; i < SectionNamesCount + thunks_count; ++i, ++pSh)
- {
- char thunkSectNameBuf[256]; // temporary buffer for .thunk_# section name
- const char *sectName;
-
- bool isThunkSection = i >= SectionNamesCount;
- if (isThunkSection)
- {
- sprintf_s(thunkSectNameBuf, _countof(thunkSectNameBuf), ".thunk_%i", i);
- sectName = thunkSectNameBuf;
- }
- else
- {
- sectName = SectionNames[i];
- }
-
- // Ensure that there is enough memory for section name,
- // reallocate if necessary.
- pSh->sh_name = sectNameOffset;
- sectNameOffset += strlen(sectName) + 1;
- if (sectNameOffset > strBuf.MemSize)
- {
- // Allocate more memory for remaining section names
- strBuf.Resize(sectNameOffset + addSize);
- addSize *= 2;
- }
-
- strcpy(strBuf.MemPtr + pSh->sh_name, sectName);
-
- // All .thunk_* sections have the same type and flags
- int index = isThunkSection ? SectionNamesCount : i;
- pSh->sh_type = Sections[index].m_type;
- pSh->sh_flags = Sections[index].m_flags;
-
- pSh->sh_addr = 0;
- pSh->sh_offset = 0;
- pSh->sh_size = 0;
- pSh->sh_link = SHN_UNDEF;
- pSh->sh_info = 0;
- pSh->sh_addralign = i == nullSectionIndex ? 0 : 1;
- pSh->sh_entsize = i == symtabSectionIndex ? sizeof(Elf_Sym) : 0;
- }
-
- // Set actual used size to avoid garbage in ELF section
- strBuf.MemSize = sectNameOffset;
-}
-
-/* Build the ELF header */
-bool NotifyGdb::BuildELFHeader(MemBuf& buf)
-{
- Elf_Ehdr* header = new Elf_Ehdr;
- buf.MemPtr = reinterpret_cast<char*>(header);
- buf.MemSize = sizeof(Elf_Ehdr);
- return true;
-}
-
/* Split full path name into directory & file names */
void NotifyGdb::SplitPathname(const char* path, const char*& pathName, const char*& fileName)
{
@@ -2817,19 +3422,6 @@ void NotifyGdb::SplitPathname(const char* path, const char*& pathName, const cha
}
}
-#ifdef _DEBUG
-void NotifyGdb::DumpElf(const char* methodName, const MemBuf& elfFile)
-{
- char dump[1024];
- strcpy(dump, "./");
- strcat(dump, methodName);
- strcat(dump, ".o");
- FILE *f = fopen(dump, "wb");
- fwrite(elfFile.MemPtr, sizeof(char),elfFile.MemSize, f);
- fclose(f);
-}
-#endif
-
/* ELF 32bit header */
Elf32_Ehdr::Elf32_Ehdr()
{
diff --git a/src/vm/gdbjit.h b/src/vm/gdbjit.h
index 6bde3f27ba..f7267ad2a1 100644
--- a/src/vm/gdbjit.h
+++ b/src/vm/gdbjit.h
@@ -121,12 +121,21 @@ struct SymbolsInfo
class DwarfDumpable
{
public:
+ DwarfDumpable() :
+ m_base_ptr(nullptr),
+ m_is_visited(false)
+ {
+ }
+
// writes all string literals this type needs to ptr
virtual void DumpStrings(char* ptr, int& offset) = 0;
virtual void DumpDebugInfo(char* ptr, int& offset) = 0;
virtual ~DwarfDumpable() {}
+
+ char *m_base_ptr;
+ bool m_is_visited;
};
class LocalsInfo
@@ -326,12 +335,13 @@ public:
};
struct Elf_Symbol;
+class Elf_Builder;
class NotifyGdb
{
public:
- static void MethodCompiled(MethodDesc* methodDescPtr);
- static void MethodDropped(MethodDesc* methodDescPtr);
+ static void MethodPrepared(MethodDesc* methodDescPtr);
+ static void MethodPitched(MethodDesc* methodDescPtr);
template <typename PARENT_TRAITS>
class DeleteValuesOnDestructSHashTraits : public PARENT_TRAITS
{
@@ -404,14 +414,19 @@ private:
}
};
- static void OnMethodCompiled(MethodDesc* methodDescPtr);
+ static void OnMethodPrepared(MethodDesc* methodDescPtr);
+
+#ifdef FEATURE_GDBJIT_FRAME
+ static bool EmitFrameInfo(Elf_Builder &, PCODE pCode, TADDR codeSzie);
+#endif // FEATURE_GDBJIT_FRAME
+#ifdef FEATURE_GDBJIT_SYMTAB
+ static bool EmitSymtab(Elf_Builder &, MethodDesc* methodDescPtr, PCODE pCode, TADDR codeSize);
+#endif // FEATURE_GDBJIT_SYMTAB
+ static bool EmitDebugInfo(Elf_Builder &, MethodDesc* methodDescPtr, PCODE pCode, TADDR codeSize, const char *szModuleFile);
- static int GetSectionIndex(const char *sectName);
- static bool BuildELFHeader(MemBuf& buf);
- static void BuildSectionTables(MemBuf& sectBuf, MemBuf& strBuf, FunctionMemberPtrArrayHolder &method,
- int symbolCount);
- static bool BuildSymbolTableSection(MemBuf& buf, PCODE addr, TADDR codeSize, FunctionMemberPtrArrayHolder &method,
- NewArrayHolder<Elf_Symbol> &symbolNames, int symbolCount);
+ static bool BuildSymbolTableSection(MemBuf& buf, PCODE addr, TADDR codeSize, int methodCount,
+ NewArrayHolder<Elf_Symbol> &symbolNames, int symbolCount,
+ unsigned int thunkIndexBase);
static bool BuildStringTableSection(MemBuf& strTab, NewArrayHolder<Elf_Symbol> &symbolNames, int symbolCount);
static bool BuildDebugStrings(MemBuf& buf, PTK_TypeInfoMap pTypeMap, FunctionMemberPtrArrayHolder &method);
static bool BuildDebugAbbrev(MemBuf& buf);
@@ -427,9 +442,6 @@ private:
static void SplitPathname(const char* path, const char*& pathName, const char*& fileName);
static bool CollectCalledMethods(CalledMethod* pCM, TADDR nativeCode, FunctionMemberPtrArrayHolder &method,
NewArrayHolder<Elf_Symbol> &symbolNames, int &symbolCount);
-#ifdef _DEBUG
- static void DumpElf(const char* methodName, const MemBuf& buf);
-#endif
};
class FunctionMember: public TypeMember
diff --git a/src/vm/genericdict.cpp b/src/vm/genericdict.cpp
index c93e583345..5fad30f4b8 100644
--- a/src/vm/genericdict.cpp
+++ b/src/vm/genericdict.cpp
@@ -742,7 +742,7 @@ Dictionary::PopulateEntry(
}
// MethodTable is expected to be normalized
- _ASSERTE(pDictionary == pMT->GetPerInstInfo()[dictionaryIndex]);
+ _ASSERTE(pDictionary == pMT->GetPerInstInfo()[dictionaryIndex].GetValueMaybeNull());
}
else
{
diff --git a/src/vm/generics.cpp b/src/vm/generics.cpp
index 51e6d7bbac..ed5313263f 100644
--- a/src/vm/generics.cpp
+++ b/src/vm/generics.cpp
@@ -255,7 +255,7 @@ ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation(
// Bytes are required for the vtable itself
S_SIZE_T safe_cbMT = S_SIZE_T( cbGC ) + S_SIZE_T( sizeof(MethodTable) );
- safe_cbMT += MethodTable::GetNumVtableIndirections(cSlots) * sizeof(PTR_PCODE);
+ safe_cbMT += MethodTable::GetNumVtableIndirections(cSlots) * sizeof(MethodTable::VTableIndir_t);
if (safe_cbMT.IsOverflow())
{
ThrowHR(COR_E_OVERFLOW);
@@ -364,7 +364,7 @@ ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation(
pMT->ClearFlag(MethodTable::enum_flag_IsZapped);
pMT->ClearFlag(MethodTable::enum_flag_IsPreRestored);
- pMT->ClearFlag(MethodTable::enum_flag_HasIndirectParent);
+ pMT->m_pParentMethodTable.SetValueMaybeNull(NULL);
// Non non-virtual slots
pMT->ClearFlag(MethodTable::enum_flag_HasSingleNonVirtualSlot);
@@ -440,7 +440,7 @@ ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation(
if (canShareVtableChunks)
{
// Share the canonical chunk
- it.SetIndirectionSlot(pOldMT->GetVtableIndirections()[it.GetIndex()]);
+ it.SetIndirectionSlot(pOldMT->GetVtableIndirections()[it.GetIndex()].GetValueMaybeNull());
}
else
{
@@ -499,7 +499,7 @@ ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation(
_ASSERTE(pOldMT->HasPerInstInfo());
// Fill in per-inst map pointer (which points to the array of generic dictionary pointers)
- pMT->SetPerInstInfo ((Dictionary**) (pMemory + cbMT + cbOptional + cbIMap + sizeof(GenericsDictInfo)));
+ pMT->SetPerInstInfo((MethodTable::PerInstInfoElem_t *) (pMemory + cbMT + cbOptional + cbIMap + sizeof(GenericsDictInfo)));
_ASSERTE(FitsIn<WORD>(pOldMT->GetNumDicts()));
_ASSERTE(FitsIn<WORD>(pOldMT->GetNumGenericArgs()));
pMT->SetDictInfo(static_cast<WORD>(pOldMT->GetNumDicts()), static_cast<WORD>(pOldMT->GetNumGenericArgs()));
@@ -508,7 +508,8 @@ ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation(
// The others are filled in by LoadExactParents which copied down any inherited generic
// dictionary pointers.
Dictionary * pDict = (Dictionary*) (pMemory + cbMT + cbOptional + cbIMap + cbPerInst);
- *(pMT->GetPerInstInfo() + (pOldMT->GetNumDicts()-1)) = pDict;
+ MethodTable::PerInstInfoElem_t *pPInstInfo = (MethodTable::PerInstInfoElem_t *) (pMT->GetPerInstInfo() + (pOldMT->GetNumDicts()-1));
+ pPInstInfo->SetValueMaybeNull(pDict);
// Fill in the instantiation section of the generic dictionary. The remainder of the
// generic dictionary will be zeroed, which is the correct initial state.
diff --git a/src/vm/genmeth.cpp b/src/vm/genmeth.cpp
index dc55221308..dd8e3283cc 100644
--- a/src/vm/genmeth.cpp
+++ b/src/vm/genmeth.cpp
@@ -120,34 +120,6 @@ static MethodDesc* CreateMethodDesc(LoaderAllocator *pAllocator,
{
pMD->SetSynchronized();
}
- if (pTemplateMD->RequiresLinktimeCheck())
- {
- pMD->SetRequiresLinktimeCheck();
- }
- if (pTemplateMD->RequiresInheritanceCheck())
- {
- pMD->SetRequiresInheritanceCheck();
- }
- if (pTemplateMD->ParentRequiresInheritanceCheck())
- {
- pMD->SetParentRequiresInheritanceCheck();
- }
- if (pTemplateMD->IsInterceptedForDeclSecurity())
- {
- pMD->SetInterceptedForDeclSecurity();
- }
- if (pTemplateMD->IsInterceptedForDeclSecurityCASDemandsOnly())
- {
- pMD->SetInterceptedForDeclSecurityCASDemandsOnly();
- }
- if (pTemplateMD->HasCriticalTransparentInfo())
- {
- pMD->SetCriticalTransparentInfo(pTemplateMD->IsCritical(), pTemplateMD->IsTreatAsSafe());
- }
- if (pTemplateMD->RequiresLinkTimeCheckHostProtectionOnly())
- {
- pMD->SetRequiresLinkTimeCheckHostProtectionOnly();
- }
pMD->SetMemberDef(token);
pMD->SetSlot(pTemplateMD->GetSlot());
diff --git a/src/vm/hosting.cpp b/src/vm/hosting.cpp
index d47bc28238..035fff8812 100644
--- a/src/vm/hosting.cpp
+++ b/src/vm/hosting.cpp
@@ -480,12 +480,15 @@ BOOL EEHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem)
#ifdef _DEBUG
GlobalAllocStore::RemoveAlloc (lpMem);
- // Check the heap handle to detect heap contamination
- lpMem = (BYTE*)lpMem - OS_HEAP_ALIGN;
- HANDLE storedHeapHandle = *((HANDLE*)lpMem);
- if(storedHeapHandle != hHeap)
- _ASSERTE(!"Heap contamination detected! HeapFree was called on a heap other than the one that memory was allocated from.\n"
- "Possible cause: you used new (executable) to allocate the memory, but didn't use DeleteExecutable() to free it.");
+ if (lpMem != NULL)
+ {
+ // Check the heap handle to detect heap contamination
+ lpMem = (BYTE*)lpMem - OS_HEAP_ALIGN;
+ HANDLE storedHeapHandle = *((HANDLE*)lpMem);
+ if(storedHeapHandle != hHeap)
+ _ASSERTE(!"Heap contamination detected! HeapFree was called on a heap other than the one that memory was allocated from.\n"
+ "Possible cause: you used new (executable) to allocate the memory, but didn't use DeleteExecutable() to free it.");
+ }
#endif
// DON'T REMOVE THIS SEEMINGLY USELESS CAST
//
diff --git a/src/vm/i386/cgencpu.h b/src/vm/i386/cgencpu.h
index ff76d992fc..e4a623b715 100644
--- a/src/vm/i386/cgencpu.h
+++ b/src/vm/i386/cgencpu.h
@@ -504,6 +504,7 @@ struct DECLSPEC_ALIGN(4) UMEntryThunkCode
const BYTE * m_execstub; // pointer to destination code // make sure the backpatched portion is dword aligned.
void Encode(BYTE* pTargetCode, void* pvSecretParam);
+ void Poison();
LPCBYTE GetEntryPoint() const
{
diff --git a/src/vm/i386/cgenx86.cpp b/src/vm/i386/cgenx86.cpp
index c75490babd..9b8960a6eb 100644
--- a/src/vm/i386/cgenx86.cpp
+++ b/src/vm/i386/cgenx86.cpp
@@ -19,7 +19,6 @@
#include "dllimport.h"
#include "comdelegate.h"
#include "log.h"
-#include "security.h"
#include "comdelegate.h"
#include "array.h"
#include "jitinterface.h"
@@ -1588,6 +1587,13 @@ void UMEntryThunkCode::Encode(BYTE* pTargetCode, void* pvSecretParam)
FlushInstructionCache(GetCurrentProcess(),GetEntryPoint(),sizeof(UMEntryThunkCode));
}
+void UMEntryThunkCode::Poison()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ m_movEAX = X86_INSTR_INT3;
+}
+
UMEntryThunk* UMEntryThunk::Decode(LPVOID pCallback)
{
LIMITED_METHOD_CONTRACT;
diff --git a/src/vm/i386/stublinkerx86.cpp b/src/vm/i386/stublinkerx86.cpp
index 1ad4636a8f..b77609822b 100644
--- a/src/vm/i386/stublinkerx86.cpp
+++ b/src/vm/i386/stublinkerx86.cpp
@@ -21,7 +21,6 @@
#include "excep.h"
#include "dllimport.h"
#include "log.h"
-#include "security.h"
#include "comdelegate.h"
#include "array.h"
#include "jitinterface.h"
@@ -6720,7 +6719,7 @@ BOOL FixupPrecode::SetTargetInterlocked(TADDR target, TADDR expected)
}
else if (pOldValue[OFFSETOF_PRECODE_TYPE_CALL_OR_JMP] == FixupPrecode::Type)
{
-#ifdef FEATURE_TIERED_COMPILATION
+#ifdef FEATURE_CODE_VERSIONING
// No change needed, jmp is already in place
#else
// Setting the target more than once is unexpected
diff --git a/src/vm/ilinstrumentation.cpp b/src/vm/ilinstrumentation.cpp
new file mode 100644
index 0000000000..a2bdbf1a60
--- /dev/null
+++ b/src/vm/ilinstrumentation.cpp
@@ -0,0 +1,90 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// ===========================================================================
+// File: ILInstrumentation.cpp
+//
+// ===========================================================================
+
+
+#include "common.h"
+#include "ilinstrumentation.h"
+
+
+//---------------------------------------------------------------------------------------
+InstrumentedILOffsetMapping::InstrumentedILOffsetMapping()
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_cMap = 0;
+ m_rgMap = NULL;
+ _ASSERTE(IsNull());
+}
+
+//---------------------------------------------------------------------------------------
+//
+// Check whether there is any mapping information stored in this object.
+//
+// Notes:
+// The memory should be alive throughout the process lifetime until
+// the Module containing the instrumented method is destructed.
+//
+
+BOOL InstrumentedILOffsetMapping::IsNull() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
+ return (m_cMap == 0);
+}
+
+#if !defined(DACCESS_COMPILE)
+//---------------------------------------------------------------------------------------
+//
+// Release the memory used by the array of COR_IL_MAPs.
+//
+// Notes:
+// * The memory should be alive throughout the process lifetime until the Module containing
+// the instrumented method is destructed.
+// * This struct should be read-only in DAC builds.
+//
+
+void InstrumentedILOffsetMapping::Clear()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (m_rgMap != NULL)
+ {
+ delete[] m_rgMap;
+ }
+
+ m_cMap = 0;
+ m_rgMap = NULL;
+}
+#endif // !DACCESS_COMPILE
+
+#if !defined(DACCESS_COMPILE)
+void InstrumentedILOffsetMapping::SetMappingInfo(SIZE_T cMap, COR_IL_MAP * rgMap)
+{
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE((cMap == 0) == (rgMap == NULL));
+ m_cMap = cMap;
+ m_rgMap = ARRAY_PTR_COR_IL_MAP(rgMap);
+}
+#endif // !DACCESS_COMPILE
+
+SIZE_T InstrumentedILOffsetMapping::GetCount() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
+ return m_cMap;
+}
+
+ARRAY_PTR_COR_IL_MAP InstrumentedILOffsetMapping::GetOffsets() const
+{
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE((m_cMap == 0) == (m_rgMap == NULL));
+ return m_rgMap;
+}
diff --git a/src/vm/ilinstrumentation.h b/src/vm/ilinstrumentation.h
new file mode 100644
index 0000000000..cc486ede3f
--- /dev/null
+++ b/src/vm/ilinstrumentation.h
@@ -0,0 +1,116 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+// ===========================================================================
+// File: ILInstrumentation.h
+//
+// ===========================================================================
+
+
+
+#ifndef IL_INSTRUMENTATION_H
+#define IL_INSTRUMENTATION_H
+
+// declare an array type of COR_IL_MAP entries
+typedef ArrayDPTR(COR_IL_MAP) ARRAY_PTR_COR_IL_MAP;
+
+//---------------------------------------------------------------------------------------
+//
+// A profiler may instrument a method by changing the IL. This is typically done when the profiler receives
+// a JITCompilationStarted notification. The profiler also has the option to provide the runtime with
+// a mapping between original IL offsets and instrumented IL offsets. This struct is a simple container
+// for storing the mapping information. We store the mapping information on the Module class, where it can
+// be accessed by the debugger from out-of-process.
+//
+
+class InstrumentedILOffsetMapping
+{
+public:
+ InstrumentedILOffsetMapping();
+
+ // Check whether there is any mapping information stored in this object.
+ BOOL IsNull() const;
+
+#if !defined(DACCESS_COMPILE)
+ // Release the memory used by the array of COR_IL_MAPs.
+ void Clear();
+
+ void SetMappingInfo(SIZE_T cMap, COR_IL_MAP * rgMap);
+#endif // !DACCESS_COMPILE
+
+ SIZE_T GetCount() const;
+ ARRAY_PTR_COR_IL_MAP GetOffsets() const;
+
+private:
+ SIZE_T m_cMap; // the number of elements in m_rgMap
+ ARRAY_PTR_COR_IL_MAP m_rgMap; // an array of COR_IL_MAPs
+};
+
+//---------------------------------------------------------------------------------------
+//
+// Hash table entry for storing InstrumentedILOffsetMapping. This is keyed by the MethodDef token.
+//
+
+struct ILOffsetMappingEntry
+{
+ ILOffsetMappingEntry()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_methodToken = mdMethodDefNil;
+ // No need to initialize m_mapping. The default ctor of InstrumentedILOffsetMapping does the job.
+ }
+
+ ILOffsetMappingEntry(mdMethodDef token, InstrumentedILOffsetMapping mapping)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ m_methodToken = token;
+ m_mapping = mapping;
+ }
+
+ mdMethodDef m_methodToken;
+ InstrumentedILOffsetMapping m_mapping;
+};
+
+//---------------------------------------------------------------------------------------
+//
+// This class is used to create the hash table for the instrumented IL offset mapping.
+// It encapsulates the desired behaviour of the templated hash table and implements
+// the various functions needed by the hash table.
+//
+
+class ILOffsetMappingTraits : public NoRemoveSHashTraits<DefaultSHashTraits<ILOffsetMappingEntry> >
+{
+public:
+ typedef mdMethodDef key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return e.m_methodToken;
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (k1 == k2);
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (count_t)(size_t)k;
+ }
+ static const element_t Null()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ ILOffsetMappingEntry e;
+ return e;
+ }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_DAC_CONTRACT; return e.m_methodToken == mdMethodDefNil; }
+};
+
+// Hash table of profiler-provided instrumented IL offset mapping, keyed by the MethodDef token
+typedef SHash<ILOffsetMappingTraits> ILOffsetMappingTable;
+typedef DPTR(ILOffsetMappingTable) PTR_ILOffsetMappingTable;
+
+#endif // IL_INSTRUMENTATION_H
diff --git a/src/vm/interpreter.cpp b/src/vm/interpreter.cpp
index 6901c9c2cd..d18eede1f1 100644
--- a/src/vm/interpreter.cpp
+++ b/src/vm/interpreter.cpp
@@ -38,7 +38,6 @@ static CorInfoType asCorInfoType(CORINFO_CLASS_HANDLE clsHnd)
InterpreterMethodInfo::InterpreterMethodInfo(CEEInfo* comp, CORINFO_METHOD_INFO* methInfo)
: m_method(methInfo->ftn),
m_module(methInfo->scope),
- m_jittedCode(0),
m_ILCode(methInfo->ILCode),
m_ILCodeEnd(methInfo->ILCode + methInfo->ILCodeSize),
m_maxStack(methInfo->maxStack),
@@ -798,12 +797,6 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
// So the structure of the code will look like this (in the non-ILstub case):
//
#if defined(_X86_) || defined(_AMD64_)
- // First do "short-circuiting" if the method has JITted code, and we couldn't find/update the call site:
- // eax = &interpMethInfo
- // eax = [eax + offsetof(m_jittedCode)]
- // if (eax == zero) goto doInterpret:
- // /*else*/ jmp [eax]
- // doInterpret:
// push ebp
// mov ebp, esp
// [if there are register arguments in ecx or edx, push them]
@@ -817,41 +810,6 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
// TODO.
#endif
- // The IL stub case is hard. The portion of the interpreter stub that short-circuits
- // to JITted code requires an extra "scratch" volatile register, not an argument register;
- // in the IL stub case, it too is using such a register, as an extra argument, to hold the stub context.
- // On x86 and ARM, there is only one such extra volatile register, and we've got a conundrum.
- // The cases where this short-circuiting is important is when the address of an interpreter stub
- // becomes "embedded" in other code. The examples I know of are VSD stubs and delegates.
- // The first of these is not a problem for IL stubs -- methods invoked via p/Invoke (the ones that
- // [I think!] use IL stubs) are static, and cannot be invoked via VSD stubs. Delegates, on the other
- // remain a problem [I believe].
- // For the short term, we'll ignore this issue, and never do short-circuiting for IL stubs.
- // So interpreter stubs embedded in delegates will continue to interpret the IL stub, even after
- // the stub has been JITted.
- // The long-term intention is that when we JIT a method with an interpreter stub, we keep a mapping
- // from interpreter stub address to corresponding native code address. If this mapping is non-empty,
- // at GC time we would visit the locations in which interpreter stub addresses might be located, like
- // VSD stubs and delegate objects, and update them to point to new addresses. This would be a necessary
- // part of any scheme to GC interpreter stubs, and InterpreterMethodInfos.
-
- // If we *really* wanted to make short-circuiting work for the IL stub case, we would have to
- // (in the x86 case, which should be sufficiently illustrative):
- // push eax
- // <get the address of JITted code, if any, into eax>
- // if there is JITted code in eax, we'd have to
- // push 2 non-volatile registers, say esi and edi.
- // copy the JITted code address from eax into esi.
- // copy the method arguments (without the return address) down the stack, using edi
- // as a scratch register.
- // restore the original stub context value into eax from the stack
- // call (not jmp) to the JITted code address in esi
- // pop esi and edi from the stack.
- // now the stack has original args, followed by original return address. Do a "ret"
- // that returns to the return address, and also pops the original args from the stack.
- // If we did this, we'd have to give this portion of the stub proper unwind info.
- // Also, we'd have to adjust the rest of the stub to pop eax from the stack.
-
// TODO: much of the interpreter stub code should be is shareable. In the non-IL stub case,
// at least, we could have a small per-method stub that puts the address of the method-specific
// InterpreterMethodInfo into eax, and then branches to a shared part. Probably we would want to
@@ -868,24 +826,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
{
sl.Init();
#if defined(_X86_) || defined(_AMD64_)
- // First we do "short-circuiting" if the method has JITted code.
-#if INTERP_ILSTUBS
- if (!pMD->IsILStub()) // As discussed above, we don't do short-circuiting for IL stubs.
-#endif
- {
- // First read the m_jittedCode field.
- sl.X86EmitRegLoad(kEAX, UINT_PTR(interpMethInfo));
- sl.X86EmitOffsetModRM(0x8b, kEAX, kEAX, offsetof(InterpreterMethodInfo, m_jittedCode));
- // If it is still zero, then go on to do the interpretation.
- sl.X86EmitCmpRegImm32(kEAX, 0);
- CodeLabel* doInterpret = sl.NewCodeLabel();
- sl.X86EmitCondJump(doInterpret, X86CondCode::kJE);
- // Otherwise...
- sl.X86EmitJumpReg(kEAX); // tail call to JITted code.
- sl.EmitLabel(doInterpret);
- }
#if defined(_X86_)
- // Start regular interpretation
sl.X86EmitPushReg(kEBP);
sl.X86EmitMovRegReg(kEBP, static_cast<X86Reg>(kESP_Unsafe));
#endif
@@ -895,43 +836,10 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
ThumbReg r11 = ThumbReg(11);
ThumbReg r12 = ThumbReg(12);
-#if INTERP_ILSTUBS
- if (!pMD->IsILStub()) // As discussed above, we don't do short-circuiting for IL stubs.
-#endif
- {
- // But we also have to use r4, because ThumbEmitCondRegJump below requires a low register.
- sl.ThumbEmitMovConstant(r11, 0);
- sl.ThumbEmitMovConstant(r12, UINT_PTR(interpMethInfo));
- sl.ThumbEmitLoadRegIndirect(r12, r12, offsetof(InterpreterMethodInfo, m_jittedCode));
- sl.ThumbEmitCmpReg(r12, r11); // Set condition codes.
- // If r12 is zero, then go on to do the interpretation.
- CodeLabel* doInterpret = sl.NewCodeLabel();
- sl.ThumbEmitCondFlagJump(doInterpret, thumbCondEq.cond);
- sl.ThumbEmitJumpRegister(r12); // If non-zero, tail call to JITted code.
- sl.EmitLabel(doInterpret);
- }
-
- // Start regular interpretation
-
#elif defined(_ARM64_)
// x8 through x15 are scratch registers on ARM64.
IntReg x8 = IntReg(8);
IntReg x9 = IntReg(9);
-
-#if INTERP_ILSTUBS
- if (!pMD->IsILStub()) // As discussed above, we don't do short-circuiting for IL stubs.
-#endif
- {
- sl.EmitMovConstant(x8, UINT64(interpMethInfo));
- sl.EmitLoadStoreRegImm(StubLinkerCPU::eLOAD, x9, x8, offsetof(InterpreterMethodInfo, m_jittedCode));
- sl.EmitCmpImm(x9, 0);
- CodeLabel* doInterpret = sl.NewCodeLabel();
- sl.EmitCondFlagJump(doInterpret, CondEq.cond);
- sl.EmitJumpRegister(x9);
- sl.EmitLabel(doInterpret);
- }
-
- // Start regular interpretation
#else
#error unsupported platform
#endif
@@ -1749,8 +1657,16 @@ void Interpreter::JitMethodIfAppropriate(InterpreterMethodInfo* interpMethInfo,
md->GetMDImport(),
&status);
}
- PCODE res = md->MakeJitWorker(pDecoder, jitFlags);
- interpMethInfo->m_jittedCode = res;
+ // This used to be a synchronous jit and could be made so again if desired,
+ // but using ASP.Net MusicStore as an example scenario the performance is
+ // better doing the JIT asynchronously. Given the not-on-by-default nature of the
+ // interpreter I didn't wring my hands too much trying to determine the ideal
+ // policy.
+#ifdef FEATURE_TIERED_COMPILATION
+ GetAppDomain()->GetTieredCompilationManager()->AsyncPromoteMethodToTier1(md);
+#else
+#error FEATURE_INTERPRETER depends on FEATURE_TIERED_COMPILATION now
+#endif
}
}
}
@@ -10294,7 +10210,7 @@ void Interpreter::CallI()
MethodDesc* pMD;
if (mSig.HasThis())
{
- pMD = g_pObjectCtorMD;
+ pMD = g_pObjectFinalizerMD;
}
else
{
diff --git a/src/vm/interpreter.h b/src/vm/interpreter.h
index dc7638ca7d..fd4a68bea3 100644
--- a/src/vm/interpreter.h
+++ b/src/vm/interpreter.h
@@ -552,9 +552,6 @@ struct InterpreterMethodInfo
// The module containing the method.
CORINFO_MODULE_HANDLE m_module;
- // If the method has been JITted, it's JITted code (for indirection).
- PCODE m_jittedCode;
-
// Code pointer, size, and max stack usage.
BYTE* m_ILCode;
BYTE* m_ILCodeEnd; // One byte past the last byte of IL. IL Code Size = m_ILCodeEnd - m_ILCode.
diff --git a/src/vm/invokeutil.cpp b/src/vm/invokeutil.cpp
index 9efc84d711..4c1dd4d203 100644
--- a/src/vm/invokeutil.cpp
+++ b/src/vm/invokeutil.cpp
@@ -18,7 +18,6 @@
#include "method.hpp"
#include "threads.h"
#include "excep.h"
-#include "security.h"
#include "field.h"
#include "customattribute.h"
#include "eeconfig.h"
@@ -601,11 +600,9 @@ void InvokeUtil::ValidField(TypeHandle th, OBJECTREF* value)
if (!srcTH.CanCastTo(th))
COMPlusThrow(kArgumentException,W("Arg_ObjObj"));
}
- Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
return;
}
else if (MscorlibBinder::IsClass((*value)->GetMethodTable(), CLASS__INTPTR)) {
- Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
return;
}
diff --git a/src/vm/invokeutil.h b/src/vm/invokeutil.h
index cfa1a0e96b..ec8114f76a 100644
--- a/src/vm/invokeutil.h
+++ b/src/vm/invokeutil.h
@@ -66,14 +66,6 @@ public:
virtual MethodDesc* GetCallerMethod();
virtual Assembly* GetCallerAssembly();
virtual bool IsCalledFromInterop();
-
- // The caller will be computed lazily by the reflection system.
- virtual bool IsCallerCritical()
- {
- LIMITED_METHOD_CONTRACT;
-
- return false;
- }
AccessCheckOptions::AccessCheckType GetAccessCheckType() const
{
diff --git a/src/vm/jithelpers.cpp b/src/vm/jithelpers.cpp
index bfb2b34565..32be77823c 100644
--- a/src/vm/jithelpers.cpp
+++ b/src/vm/jithelpers.cpp
@@ -20,7 +20,6 @@
#include "excep.h"
#include "float.h" // for isnan
#include "dbginterface.h"
-#include "security.h"
#include "dllimport.h"
#include "gcheaputilities.h"
#include "comdelegate.h"
@@ -46,7 +45,6 @@
#include "genericdict.h"
#include "array.h"
#include "debuginfostore.h"
-#include "security.h"
#include "safemath.h"
#include "threadstatics.h"
@@ -2398,7 +2396,7 @@ HCIMPL2(Object*, JIT_ChkCastClass_Portable, MethodTable* pTargetMT, Object* pObj
if (pMT == pTargetMT)
return pObject;
- pMT = MethodTable::GetParentMethodTableOrIndirection(pMT);
+ pMT = MethodTable::GetParentMethodTable(pMT);
} while (pMT);
ENDFORBIDGC();
@@ -2418,14 +2416,14 @@ HCIMPL2(Object*, JIT_ChkCastClassSpecial_Portable, MethodTable* pTargetMT, Objec
PRECONDITION(pObject->GetMethodTable() != pTargetMT);
} CONTRACTL_END;
- PTR_VOID pMT = MethodTable::GetParentMethodTableOrIndirection(pObject->GetMethodTable());
+ PTR_VOID pMT = MethodTable::GetParentMethodTable(pObject->GetMethodTable());
while (pMT)
{
if (pMT == pTargetMT)
return pObject;
- pMT = MethodTable::GetParentMethodTableOrIndirection(pMT);
+ pMT = MethodTable::GetParentMethodTable(pMT);
}
ENDFORBIDGC();
@@ -2452,7 +2450,7 @@ HCIMPL2(Object*, JIT_IsInstanceOfClass_Portable, MethodTable* pTargetMT, Object*
if (pMT == pTargetMT)
return pObject;
- pMT = MethodTable::GetParentMethodTableOrIndirection(pMT);
+ pMT = MethodTable::GetParentMethodTable(pMT);
} while (pMT);
if (!pObject->GetMethodTable()->HasTypeEquivalence())
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index ecabc89ba7..d960394e12 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -24,7 +24,6 @@
#include "excep.h"
#include "float.h" // for isnan
#include "dbginterface.h"
-#include "security.h"
#include "dllimport.h"
#include "gcheaputilities.h"
#include "comdelegate.h"
@@ -47,7 +46,6 @@
#include "genericdict.h"
#include "array.h"
#include "debuginfostore.h"
-#include "security.h"
#include "safemath.h"
#include "runtimehandles.h"
#include "sigbuilder.h"
@@ -68,6 +66,10 @@
#include "interpreter.h"
#endif // FEATURE_INTERPRETER
+#ifdef FEATURE_PERFMAP
+#include "perfmap.h"
+#endif
+
// The Stack Overflow probe takes place in the COOPERATIVE_TRANSITION_BEGIN() macro
//
@@ -1783,9 +1785,7 @@ void CEEInfo::getFieldInfo (CORINFO_RESOLVED_TOKEN * pResolvedToken,
fieldAttribs,
NULL,
(flags & CORINFO_ACCESS_INIT_ARRAY) ? NULL : pField, // For InitializeArray, we don't need tocheck the type of the field.
- accessCheckOptions,
- FALSE /*checkTargetMethodTransparency*/,
- TRUE /*checkTargetTypeTransparency*/);
+ accessCheckOptions);
if (!canAccess)
{
@@ -1924,14 +1924,6 @@ CEEInfo::findCallSiteSig(
if (TypeFromToken(sigMethTok) == mdtMemberRef)
{
IfFailThrow(module->GetMDImport()->GetNameAndSigOfMemberRef(sigMethTok, &pSig, &cbSig, &szName));
-
- // Defs have already been checked by the loader for validity
- // However refs need to be checked.
- if (!Security::CanSkipVerification(module->GetDomainAssembly()))
- {
- // Can pass 0 for the flags, since it is only used for defs.
- IfFailThrow(validateTokenSig(sigMethTok, pSig, cbSig, 0, module->GetMDImport()));
- }
}
else if (TypeFromToken(sigMethTok) == mdtMethodDef)
{
@@ -3093,6 +3085,7 @@ void CEEInfo::ComputeRuntimeLookupForSharedGenericToken(DictionaryEntryKind entr
pResult->signature = NULL;
pResult->indirectFirstOffset = 0;
+ pResult->indirectSecondOffset = 0;
// Unless we decide otherwise, just do the lookup via a helper function
pResult->indirections = CORINFO_USEHELPER;
@@ -3139,9 +3132,6 @@ void CEEInfo::ComputeRuntimeLookupForSharedGenericToken(DictionaryEntryKind entr
#ifdef FEATURE_READYTORUN_COMPILER
if (IsReadyToRunCompilation())
{
-#if defined(_TARGET_ARM_)
- ThrowHR(E_NOTIMPL); /* TODO - NYI */
-#endif
pResultLookup->lookupKind.runtimeLookupArgs = NULL;
switch (entryKind)
@@ -3307,6 +3297,12 @@ void CEEInfo::ComputeRuntimeLookupForSharedGenericToken(DictionaryEntryKind entr
IfFailThrow(sigptr.GetData(&data));
pResult->offsets[2] = sizeof(TypeHandle) * data;
+ if (MethodTable::IsPerInstInfoRelative())
+ {
+ pResult->indirectFirstOffset = 1;
+ pResult->indirectSecondOffset = 1;
+ }
+
return;
}
else if (type == ELEMENT_TYPE_GENERICINST &&
@@ -3554,6 +3550,12 @@ NoSpecialCase:
// Next indirect through the dictionary appropriate to this instantiated type
pResult->offsets[1] = sizeof(TypeHandle*) * (pContextMT->GetNumDicts() - 1);
+
+ if (MethodTable::IsPerInstInfoRelative())
+ {
+ pResult->indirectFirstOffset = 1;
+ pResult->indirectSecondOffset = 1;
+ }
}
}
}
@@ -5554,9 +5556,7 @@ void CEEInfo::getCallInfo(
pCalleeForSecurity->GetAttrs(),
pCalleeForSecurity,
NULL,
- accessCheckOptions,
- FALSE,
- TRUE
+ accessCheckOptions
);
// If we were allowed access to the exact method, but it is on a type that has a type parameter
@@ -5576,11 +5576,10 @@ void CEEInfo::getCallInfo(
// No accees check is need for Var, MVar, or FnPtr.
if (pTypeParamMT != NULL)
- canAccessMethod = ClassLoader::CanAccessClassForExtraChecks(&accessContext,
- pTypeParamMT,
- typeParam.GetAssembly(),
- accessCheckOptions,
- TRUE);
+ canAccessMethod = ClassLoader::CanAccessClass(&accessContext,
+ pTypeParamMT,
+ typeParam.GetAssembly(),
+ accessCheckOptions);
}
pResult->accessAllowed = canAccessMethod ? CORINFO_ACCESS_ALLOWED : CORINFO_ACCESS_ILLEGAL;
@@ -6457,6 +6456,48 @@ const char* CEEInfo::getMethodName (CORINFO_METHOD_HANDLE ftnHnd, const char** s
return result;
}
+const char* CEEInfo::getMethodNameFromMetadata(CORINFO_METHOD_HANDLE ftnHnd, const char** className, const char** namespaceName)
+{
+ CONTRACTL {
+ SO_TOLERANT;
+ THROWS;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ } CONTRACTL_END;
+
+ const char* result = NULL;
+ const char* classResult = NULL;
+ const char* namespaceResult = NULL;
+
+ JIT_TO_EE_TRANSITION();
+
+ MethodDesc *ftn = GetMethod(ftnHnd);
+ mdMethodDef token = ftn->GetMemberDef();
+
+ if (!IsNilToken(token))
+ {
+ if (!FAILED(ftn->GetMDImport()->GetNameOfMethodDef(token, &result)))
+ {
+ MethodTable* pMT = ftn->GetMethodTable();
+ classResult = pMT->GetFullyQualifiedNameInfo(&namespaceResult);
+ }
+ }
+
+ if (className != NULL)
+ {
+ *className = classResult;
+ }
+
+ if (namespaceName != NULL)
+ {
+ *namespaceName = namespaceResult;
+ }
+
+ EE_TO_JIT_TRANSITION();
+
+ return result;
+}
+
/*********************************************************************/
DWORD CEEInfo::getMethodAttribs (CORINFO_METHOD_HANDLE ftn)
{
@@ -6494,13 +6535,10 @@ DWORD CEEInfo::getMethodAttribsInternal (CORINFO_METHOD_HANDLE ftn)
if (pMD->IsLCGMethod())
{
-#ifndef CROSSGEN_COMPILE
-#endif // !CROSSGEN_COMPILE
-
return CORINFO_FLG_STATIC | CORINFO_FLG_DONT_INLINE | CORINFO_FLG_NOSECURITYWRAP;
}
- DWORD result = 0;
+ DWORD result = CORINFO_FLG_NOSECURITYWRAP;
// <REVISIT_TODO>@todo: can we git rid of CORINFO_FLG_ stuff and just include cor.h?</REVISIT_TODO>
@@ -6514,6 +6552,8 @@ DWORD CEEInfo::getMethodAttribsInternal (CORINFO_METHOD_HANDLE ftn)
result |= CORINFO_FLG_SYNCH;
if (pMD->IsFCallOrIntrinsic())
result |= CORINFO_FLG_NOGCCHECK | CORINFO_FLG_INTRINSIC;
+ if (pMD->IsJitIntrinsic())
+ result |= CORINFO_FLG_JIT_INTRINSIC;
if (IsMdVirtual(attribs))
result |= CORINFO_FLG_VIRTUAL;
if (IsMdAbstract(attribs))
@@ -6554,11 +6594,6 @@ DWORD CEEInfo::getMethodAttribsInternal (CORINFO_METHOD_HANDLE ftn)
result |= CORINFO_FLG_PINVOKE;
}
- if (!pMD->IsInterceptedForDeclSecurity())
- {
- result |= CORINFO_FLG_NOSECURITYWRAP;
- }
-
if (IsMdRequireSecObject(attribs))
{
// Assume all methods marked as DynamicSecurity are
@@ -6640,15 +6675,6 @@ void CEEInfo::setMethodAttribs (
}
}
- // Both CORINFO_FLG_UNVERIFIABLE and CORINFO_FLG_VERIFIABLE cannot be set
- _ASSERTE(!(attribs & CORINFO_FLG_UNVERIFIABLE) ||
- !(attribs & CORINFO_FLG_VERIFIABLE ));
-
- if (attribs & CORINFO_FLG_VERIFIABLE)
- ftn->SetIsVerified(TRUE);
- else if (attribs & CORINFO_FLG_UNVERIFIABLE)
- ftn->SetIsVerified(FALSE);
-
EE_TO_JIT_TRANSITION();
}
@@ -6870,7 +6896,8 @@ bool getILIntrinsicImplementationForUnsafe(MethodDesc * ftn,
methInfo->options = (CorInfoOptions)0;
return true;
}
- else if (tk == MscorlibBinder::GetMethod(METHOD__UNSAFE__BYREF_AS)->GetMemberDef())
+ else if (tk == MscorlibBinder::GetMethod(METHOD__UNSAFE__BYREF_AS)->GetMemberDef() ||
+ tk == MscorlibBinder::GetMethod(METHOD__UNSAFE__OBJECT_AS)->GetMemberDef())
{
// Return the argument that was passed in.
static const BYTE ilcode[] = { CEE_LDARG_0, CEE_RET };
@@ -6881,7 +6908,8 @@ bool getILIntrinsicImplementationForUnsafe(MethodDesc * ftn,
methInfo->options = (CorInfoOptions)0;
return true;
}
- else if (tk == MscorlibBinder::GetMethod(METHOD__UNSAFE__BYREF_ADD)->GetMemberDef())
+ else if (tk == MscorlibBinder::GetMethod(METHOD__UNSAFE__BYREF_ADD)->GetMemberDef() ||
+ tk == MscorlibBinder::GetMethod(METHOD__UNSAFE__PTR_ADD)->GetMemberDef())
{
mdToken tokGenericArg = FindGenericMethodArgTypeSpec(MscorlibBinder::GetModule()->GetMDImport());
@@ -7204,31 +7232,41 @@ getMethodInfoHelper(
bool fILIntrinsic = false;
MethodTable * pMT = ftn->GetMethodTable();
-
- if (MscorlibBinder::IsClass(pMT, CLASS__JIT_HELPERS))
- {
- fILIntrinsic = getILIntrinsicImplementation(ftn, methInfo);
- }
- else if (MscorlibBinder::IsClass(pMT, CLASS__UNSAFE))
- {
- fILIntrinsic = getILIntrinsicImplementationForUnsafe(ftn, methInfo);
- }
- else if (MscorlibBinder::IsClass(pMT, CLASS__INTERLOCKED))
- {
- fILIntrinsic = getILIntrinsicImplementationForInterlocked(ftn, methInfo);
- }
- else if (MscorlibBinder::IsClass(pMT, CLASS__VOLATILE))
- {
- fILIntrinsic = getILIntrinsicImplementationForVolatile(ftn, methInfo);
- }
- else if (MscorlibBinder::IsClass(pMT, CLASS__RUNTIME_HELPERS))
+
+ if (pMT->GetModule()->IsSystem())
{
- fILIntrinsic = getILIntrinsicImplementationForRuntimeHelpers(ftn, methInfo);
+ if (MscorlibBinder::IsClass(pMT, CLASS__JIT_HELPERS))
+ {
+ fILIntrinsic = getILIntrinsicImplementation(ftn, methInfo);
+ }
+ else if (MscorlibBinder::IsClass(pMT, CLASS__UNSAFE))
+ {
+ fILIntrinsic = getILIntrinsicImplementationForUnsafe(ftn, methInfo);
+ }
+ else if (MscorlibBinder::IsClass(pMT, CLASS__INTERLOCKED))
+ {
+ fILIntrinsic = getILIntrinsicImplementationForInterlocked(ftn, methInfo);
+ }
+ else if (MscorlibBinder::IsClass(pMT, CLASS__VOLATILE))
+ {
+ fILIntrinsic = getILIntrinsicImplementationForVolatile(ftn, methInfo);
+ }
+ else if (MscorlibBinder::IsClass(pMT, CLASS__RUNTIME_HELPERS))
+ {
+ fILIntrinsic = getILIntrinsicImplementationForRuntimeHelpers(ftn, methInfo);
+ }
}
if (!fILIntrinsic)
{
getMethodInfoILMethodHeaderHelper(header, methInfo);
+
+ // Workaround for https://github.com/dotnet/coreclr/issues/1279
+ // Set init locals bit to zero for system module unless profiler may have overrided it. Remove once we have
+ // better solution for this issue.
+ if (pMT->GetModule()->IsSystem() && !(CORProfilerDisableAllNGenImages() || CORProfilerUseProfileImages()))
+ methInfo->options = (CorInfoOptions)0;
+
pLocalSig = header->LocalVarSig;
cbLocalSig = header->cbLocalVarSig;
}
@@ -7384,12 +7422,6 @@ CEEInfo::getMethodInfo(
else
{
/* Get the IL header */
- /* <REVISIT_TODO>TODO: canInline already did validation, however, we do it again
- here because NGEN uses this function without calling canInline
- It would be nice to avoid this redundancy </REVISIT_TODO>*/
- Module* pModule = ftn->GetModule();
-
- bool verify = !Security::CanSkipVerification(ftn);
if (ftn->IsDynamicMethod())
{
@@ -7397,28 +7429,7 @@ CEEInfo::getMethodInfo(
}
else
{
- COR_ILMETHOD_DECODER::DecoderStatus status = COR_ILMETHOD_DECODER::SUCCESS;
- COR_ILMETHOD_DECODER header(ftn->GetILHeader(TRUE), ftn->GetMDImport(), verify ? &status : NULL);
-
- // If we get a verification error then we try to demand SkipVerification for the module
- if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR &&
- Security::CanSkipVerification(pModule->GetDomainAssembly()))
- {
- status = COR_ILMETHOD_DECODER::SUCCESS;
- }
-
- if (status != COR_ILMETHOD_DECODER::SUCCESS)
- {
- if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR)
- {
- // Throw a verification HR
- COMPlusThrowHR(COR_E_VERIFICATION);
- }
- else
- {
- COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
- }
- }
+ COR_ILMETHOD_DECODER header(ftn->GetILHeader(TRUE), ftn->GetMDImport(), NULL);
getMethodInfoHelper(ftn, ftnHnd, &header, methInfo);
}
@@ -7545,25 +7556,6 @@ CorInfoInline CEEInfo::canInline (CORINFO_METHOD_HANDLE hCaller,
Module * pOrigCallerModule;
pOrigCallerModule = pOrigCaller->GetLoaderModule();
- // Prevent recursive compiling/inlining/verifying
- if (pOrigCaller != pCallee)
- {
- // The Inliner may not do code verification.
- // So never inline anything that is unverifiable / bad code.
- if (!Security::CanSkipVerification(pCallee))
- {
- // Inlinee needs to be verifiable
- if (!pCallee->IsVerifiable())
- {
- result = INLINE_NEVER;
- szFailReason = "Inlinee is not verifiable";
- goto exit;
- }
- }
- }
-
- // We check this here as the call to MethodDesc::IsVerifiable()
- // may set CORINFO_FLG_DONT_INLINE.
if (pCallee->IsNotInline())
{
result = INLINE_NEVER;
@@ -7672,64 +7664,10 @@ CorInfoInline CEEInfo::canInline (CORINFO_METHOD_HANDLE hCaller,
{
// #rejit
//
- // See if rejit-specific flags for the caller disable inlining
- if ((ReJitManager::GetCurrentReJitFlags(pCaller) &
- COR_PRF_CODEGEN_DISABLE_INLINING) != 0)
- {
- result = INLINE_FAIL;
- szFailReason = "ReJIT request disabled inlining from caller";
- goto exit;
- }
-
- // If the profiler has set a mask preventing inlining, always return
- // false to the jit.
- if (CORProfilerDisableInlining())
- {
- result = INLINE_FAIL;
- szFailReason = "Profiler disabled inlining globally";
- goto exit;
- }
-
- // If the profiler wishes to be notified of JIT events and the result from
- // the above tests will cause a function to be inlined, we need to tell the
- // profiler that this inlining is going to take place, and give them a
- // chance to prevent it.
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
- if (pCaller->IsILStub() || pCallee->IsILStub())
- {
- // do nothing
- }
- else
- {
- BOOL fShouldInline;
-
- HRESULT hr = g_profControlBlock.pProfInterface->JITInlining(
- (FunctionID)pCaller,
- (FunctionID)pCallee,
- &fShouldInline);
-
- if (SUCCEEDED(hr) && !fShouldInline)
- {
- result = INLINE_FAIL;
- szFailReason = "Profiler disabled inlining locally";
- goto exit;
- }
- }
- END_PIN_PROFILER();
- }
- }
-#endif // PROFILING_SUPPORTED
-
-
-#ifdef PROFILING_SUPPORTED
- if (CORProfilerPresent())
- {
- // #rejit
- //
- // See if rejit-specific flags for the caller disable inlining
- if ((ReJitManager::GetCurrentReJitFlags(pCaller) &
- COR_PRF_CODEGEN_DISABLE_INLINING) != 0)
+ // Currently the rejit path is the only path which sets this.
+ // If we get more reasons to set this then we may need to change
+ // the failure reason message or disambiguate them.
+ if (!m_allowInlining)
{
result = INLINE_FAIL;
szFailReason = "ReJIT request disabled inlining from caller";
@@ -8018,8 +7956,7 @@ CorInfoInstantiationVerification
goto exit;
}
- result = pMethod->IsVerifiable() ? INSTVER_GENERIC_PASSED_VERIFICATION
- : INSTVER_GENERIC_FAILED_VERIFICATION;
+ result = INSTVER_GENERIC_PASSED_VERIFICATION;
exit: ;
@@ -8074,16 +8011,6 @@ bool CEEInfo::canTailCall (CORINFO_METHOD_HANDLE hCaller,
goto exit;
}
- // TailCalls will throw off security stackwalking logic when there is a declarative Assert
- // Note that this check will also include declarative demands. It's OK to do a tailcall in
- // those cases, but we currently don't have a way to check only for declarative Asserts.
- if (pCaller->IsInterceptedForDeclSecurity())
- {
- result = false;
- szFailReason = "Caller has declarative security";
- goto exit;
- }
-
if (!fIsTailPrefix)
{
mdMethodDef callerToken = pCaller->GetMemberDef();
@@ -8581,7 +8508,8 @@ CONTRACTL {
/*********************************************************************/
void CEEInfo::getMethodVTableOffset (CORINFO_METHOD_HANDLE methodHnd,
unsigned * pOffsetOfIndirection,
- unsigned * pOffsetAfterIndirection)
+ unsigned * pOffsetAfterIndirection,
+ bool * isRelative)
{
CONTRACTL {
SO_TOLERANT;
@@ -8602,8 +8530,9 @@ void CEEInfo::getMethodVTableOffset (CORINFO_METHOD_HANDLE methodHnd,
// better be in the vtable
_ASSERTE(method->GetSlot() < method->GetMethodTable()->GetNumVirtuals());
- *pOffsetOfIndirection = MethodTable::GetVtableOffset() + MethodTable::GetIndexOfVtableIndirection(method->GetSlot()) * sizeof(PTR_PCODE);
+ *pOffsetOfIndirection = MethodTable::GetVtableOffset() + MethodTable::GetIndexOfVtableIndirection(method->GetSlot()) * sizeof(MethodTable::VTableIndir_t);
*pOffsetAfterIndirection = MethodTable::GetIndexAfterVtableIndirection(method->GetSlot()) * sizeof(PCODE);
+ *isRelative = MethodTable::VTableIndir_t::isRelative ? 1 : 0;
EE_TO_JIT_TRANSITION_LEAF();
}
@@ -9391,7 +9320,6 @@ CorInfoType CEEInfo::getHFAType(CORINFO_CLASS_HANDLE hClass)
CorInfoType result = CORINFO_TYPE_UNDEF;
-#ifdef FEATURE_HFA
JIT_TO_EE_TRANSITION();
TypeHandle VMClsHnd(hClass);
@@ -9399,7 +9327,6 @@ CorInfoType CEEInfo::getHFAType(CORINFO_CLASS_HANDLE hClass)
result = asCorInfoType(VMClsHnd.GetHFAType());
EE_TO_JIT_TRANSITION();
-#endif
return result;
}
@@ -11819,6 +11746,7 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
#ifdef FEATURE_INTERPRETER
static ConfigDWORD s_InterpreterFallback;
+ bool isInterpreterStub = false;
bool interpreterFallback = (s_InterpreterFallback.val(CLRConfig::INTERNAL_InterpreterFallback) != 0);
if (interpreterFallback == false)
@@ -11827,7 +11755,10 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
// (We assume that importation is completely architecture-independent, or at least nearly so.)
if (FAILED(ret) && !jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY) && !jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE))
{
- ret = Interpreter::GenerateInterpreterStub(comp, info, nativeEntry, nativeSizeOfCode);
+ if (SUCCEEDED(ret = Interpreter::GenerateInterpreterStub(comp, info, nativeEntry, nativeSizeOfCode)))
+ {
+ isInterpreterStub = true;
+ }
}
}
@@ -11847,7 +11778,10 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
// (We assume that importation is completely architecture-independent, or at least nearly so.)
if (FAILED(ret) && !jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY) && !jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE))
{
- ret = Interpreter::GenerateInterpreterStub(comp, info, nativeEntry, nativeSizeOfCode);
+ if (SUCCEEDED(ret = Interpreter::GenerateInterpreterStub(comp, info, nativeEntry, nativeSizeOfCode)))
+ {
+ isInterpreterStub = true;
+ }
}
}
#else
@@ -11882,7 +11816,13 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
#if defined(FEATURE_GDBJIT)
- if (SUCCEEDED(ret) && *nativeEntry != NULL)
+ bool isJittedEntry = SUCCEEDED(ret) && *nativeEntry != NULL;
+
+#ifdef FEATURE_INTERPRETER
+ isJittedEntry &= !isInterpreterStub;
+#endif // FEATURE_INTERPRETER
+
+ if (isJittedEntry)
{
CodeHeader* pCH = ((CodeHeader*)((PCODE)*nativeEntry & ~1)) - 1;
pCH->SetCalledMethods((PTR_VOID)comp->GetCalledMethods());
@@ -11926,13 +11866,6 @@ CorJitResult invokeCompileMethod(EEJitManager *jitMgr,
return ret;
}
-CORJIT_FLAGS GetCompileFlagsIfGenericInstantiation(
- CORINFO_METHOD_HANDLE method,
- CORJIT_FLAGS compileFlags,
- ICorJitInfo * pCorJitInfo,
- BOOL * raiseVerificationException,
- BOOL * unverifiableGenericCode);
-
CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr,
CEEInfo *comp,
struct CORINFO_METHOD_INFO *info,
@@ -12182,22 +12115,10 @@ CORJIT_FLAGS GetCompileFlags(MethodDesc * ftn, CORJIT_FLAGS flags, CORINFO_METHO
}
}
- //
- // Verification flags
- //
-
-#ifdef _DEBUG
- if (g_pConfig->IsJitVerificationDisabled())
- flags.Set(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION);
-#endif // _DEBUG
-
- if (!flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY) && Security::CanSkipVerification(ftn))
- flags.Set(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION);
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION);
if (ftn->IsILStub())
{
- flags.Set(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION);
-
// no debug info available for IL stubs
flags.Clear(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_INFO);
}
@@ -12205,148 +12126,6 @@ CORJIT_FLAGS GetCompileFlags(MethodDesc * ftn, CORJIT_FLAGS flags, CORINFO_METHO
return flags;
}
-#if defined(_WIN64)
-//The implementation of Jit64 prevents it from both inlining and verifying at the same time. This causes a
-//perf problem for code that adopts Transparency. This code attempts to enable inlining in spite of that
-//limitation in that scenario.
-//
-//This only works for real methods. If the method isn't IsIL, then IsVerifiable will AV. That would be a
-//bad thing (TM).
-BOOL IsTransparentMethodSafeToSkipVerification(CORJIT_FLAGS flags, MethodDesc * ftn)
-{
- STANDARD_VM_CONTRACT;
-
- BOOL ret = FALSE;
- if (!flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY) && !flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION)
- && Security::IsMethodTransparent(ftn) &&
- ((ftn->IsIL() && !ftn->IsUnboxingStub()) ||
- (ftn->IsDynamicMethod() && !ftn->IsILStub())))
- {
- EX_TRY
- {
- //Verify the method
- ret = ftn->IsVerifiable();
- }
- EX_CATCH
- {
- //If the jit throws an exception, do not let it leak out of here. For example, we can sometimes
- //get an IPE that we could recover from in the Jit (i.e. invalid local in a method with skip
- //verification).
- }
- EX_END_CATCH(RethrowTerminalExceptions)
- }
- return ret;
-}
-#else
-#define IsTransparentMethodSafeToSkipVerification(flags,ftn) (FALSE)
-#endif //_WIN64
-
-/*********************************************************************/
-// We verify generic code once and for all using the typical open type,
-// and then no instantiations need to be verified. If verification
-// failed, then we need to throw an exception whenever we try
-// to compile a real instantiation
-
-CORJIT_FLAGS GetCompileFlagsIfGenericInstantiation(
- CORINFO_METHOD_HANDLE method,
- CORJIT_FLAGS compileFlags,
- ICorJitInfo * pCorJitInfo,
- BOOL * raiseVerificationException,
- BOOL * unverifiableGenericCode)
-{
- STANDARD_VM_CONTRACT;
-
- *raiseVerificationException = FALSE;
- *unverifiableGenericCode = FALSE;
-
- // If we have already decided to skip verification, keep on going.
- if (compileFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION))
- return compileFlags;
-
- CorInfoInstantiationVerification ver = pCorJitInfo->isInstantiationOfVerifiedGeneric(method);
-
- switch(ver)
- {
- case INSTVER_NOT_INSTANTIATION:
- // Non-generic, or open instantiation of a generic type/method
- if (IsTransparentMethodSafeToSkipVerification(compileFlags, (MethodDesc*)method))
- compileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION);
- return compileFlags;
-
- case INSTVER_GENERIC_PASSED_VERIFICATION:
- // If the typical instantiation is verifiable, there is no need
- // to verify the concrete instantiations
- compileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION);
- return compileFlags;
-
- case INSTVER_GENERIC_FAILED_VERIFICATION:
-
- *unverifiableGenericCode = TRUE;
-
- // The generic method is not verifiable.
- // Check if it has SkipVerification permission
- MethodDesc * pGenMethod = GetMethod(method)->LoadTypicalMethodDefinition();
-
- CORINFO_METHOD_HANDLE genMethodHandle = CORINFO_METHOD_HANDLE(pGenMethod);
-
- CorInfoCanSkipVerificationResult canSkipVer;
- canSkipVer = pCorJitInfo->canSkipMethodVerification(genMethodHandle);
-
- switch(canSkipVer)
- {
-
-#ifdef FEATURE_PREJIT
- case CORINFO_VERIFICATION_DONT_JIT:
- {
- // Transparent code could be partial trust, but we don't know at NGEN time.
- // This is the flag that NGEN passes to the JIT to tell it to give-up if it
- // hits unverifiable code. Since we've already hit unverifiable code,
- // there's no point in starting the JIT, just to have it give up, so we
- // give up here.
- _ASSERTE(compileFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_PREJIT));
- *raiseVerificationException = TRUE;
- return CORJIT_FLAGS(); // This value will not be used
- }
-#else // FEATURE_PREJIT
- // Need to have this case here to keep the MAC build happy
- case CORINFO_VERIFICATION_DONT_JIT:
- {
- _ASSERTE(!"We should never get here");
- return compileFlags;
- }
-#endif // FEATURE_PREJIT
-
- case CORINFO_VERIFICATION_CANNOT_SKIP:
- {
- // For unverifiable generic code without SkipVerification permission,
- // we cannot ask the compiler to emit CORINFO_HELP_VERIFICATION in
- // unverifiable branches as the compiler cannot determine the unverifiable
- // branches while compiling the concrete instantiation. Instead,
- // just throw a VerificationException right away.
- *raiseVerificationException = TRUE;
- return CORJIT_FLAGS(); // This value will not be used
- }
-
- case CORINFO_VERIFICATION_CAN_SKIP:
- {
- compileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION);
- return compileFlags;
- }
-
- case CORINFO_VERIFICATION_RUNTIME_CHECK:
- {
- // Compile the method without CORJIT_FLAG_SKIP_VERIFICATION.
- // The compiler will know to add a call to
- // CORINFO_HELP_VERIFICATION_RUNTIME_CHECK, and then to skip verification.
- return compileFlags;
- }
- }
- }
-
- _ASSERTE(!"We should never get here");
- return compileFlags;
-}
-
// ********************************************************************
// Throw the right type of exception for the given JIT result
@@ -12554,7 +12333,8 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
for (;;)
{
#ifndef CROSSGEN_COMPILE
- CEEJitInfo jitInfo(ftn, ILHeader, jitMgr, flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY));
+ CEEJitInfo jitInfo(ftn, ILHeader, jitMgr, flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY),
+ !flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_NO_INLINING));
#else
// This path should be only ever used for verification in crossgen and so we should not need EEJitManager
_ASSERTE(flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY));
@@ -12604,26 +12384,12 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
pMethodForSecurity->GetAttrs(),
pMethodForSecurity,
NULL,
- accessCheckOptions,
- TRUE /*Check method transparency*/,
- TRUE /*Check type transparency*/))
+ accessCheckOptions))
{
EX_THROW(EEMethodException, (pMethodForSecurity));
}
}
- BOOL raiseVerificationException, unverifiableGenericCode;
-
- flags = GetCompileFlagsIfGenericInstantiation(
- ftnHnd,
- flags,
- &jitInfo,
- &raiseVerificationException,
- &unverifiableGenericCode);
-
- if (raiseVerificationException)
- COMPlusThrow(kVerificationException);
-
CorJitResult res;
PBYTE nativeEntry;
ULONG sizeOfCode;
@@ -12720,11 +12486,6 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
if (flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY))
{
- // The method must been processed by the verifier. Note that it may
- // either have been marked as verifiable or unverifiable.
- // ie. IsVerified() does not imply IsVerifiable()
- _ASSERTE(ftn->IsVerified());
-
// We are done
break;
}
@@ -12829,6 +12590,10 @@ void Module::LoadHelperTable()
BYTE * curEntry = table;
BYTE * tableEnd = table + tableSize;
+#ifdef FEATURE_PERFMAP
+ PerfMap::LogStubs(__FUNCTION__, GetSimpleName(), (PCODE)table, tableSize);
+#endif
+
#ifdef LOGGING
int iEntryNumber = 0;
#endif // LOGGING
diff --git a/src/vm/jitinterface.h b/src/vm/jitinterface.h
index 1dccdb24e9..93470ecbac 100644
--- a/src/vm/jitinterface.h
+++ b/src/vm/jitinterface.h
@@ -649,6 +649,7 @@ public:
// ICorMethodInfo stuff
const char* getMethodName (CORINFO_METHOD_HANDLE ftnHnd, const char** scopeName);
+ const char* getMethodNameFromMetadata (CORINFO_METHOD_HANDLE ftnHnd, const char** className, const char** namespaceName);
unsigned getMethodHash (CORINFO_METHOD_HANDLE ftnHnd);
DWORD getMethodAttribs (CORINFO_METHOD_HANDLE ftnHnd);
@@ -728,8 +729,8 @@ public:
void getMethodVTableOffset (
CORINFO_METHOD_HANDLE methodHnd,
unsigned * pOffsetOfIndirection,
- unsigned * pOffsetAfterIndirection
- );
+ unsigned * pOffsetAfterIndirection,
+ bool * isRelative);
CORINFO_METHOD_HANDLE resolveVirtualMethod(
CORINFO_METHOD_HANDLE virtualMethod,
@@ -1053,16 +1054,17 @@ public:
DWORD getExpectedTargetArchitecture();
- CEEInfo(MethodDesc * fd = NULL, bool fVerifyOnly = false) :
+ CEEInfo(MethodDesc * fd = NULL, bool fVerifyOnly = false, bool fAllowInlining = true) :
m_pOverride(NULL),
m_pMethodBeingCompiled(fd),
m_fVerifyOnly(fVerifyOnly),
m_pThread(GetThread()),
m_hMethodForSecurity_Key(NULL),
- m_pMethodForSecurity_Value(NULL)
+ m_pMethodForSecurity_Value(NULL),
#if defined(FEATURE_GDBJIT)
- , m_pCalledMethods(NULL)
+ m_pCalledMethods(NULL),
#endif
+ m_allowInlining(fAllowInlining)
{
LIMITED_METHOD_CONTRACT;
}
@@ -1155,6 +1157,8 @@ protected:
CalledMethod * m_pCalledMethods;
#endif
+ bool m_allowInlining;
+
// Tracking of module activation dependencies. We have two flavors:
// - Fast one that gathers generic arguments from EE handles, but does not work inside generic context.
// - Slow one that operates on typespec and methodspecs from metadata.
@@ -1331,8 +1335,8 @@ public:
#endif
CEEJitInfo(MethodDesc* fd, COR_ILMETHOD_DECODER* header,
- EEJitManager* jm, bool fVerifyOnly)
- : CEEInfo(fd, fVerifyOnly),
+ EEJitManager* jm, bool fVerifyOnly, bool allowInlining = true)
+ : CEEInfo(fd, fVerifyOnly, allowInlining),
m_jitManager(jm),
m_CodeHeader(NULL),
m_ILHeader(header),
@@ -1465,7 +1469,6 @@ protected :
void* m_pvGphProfilerHandle;
} m_gphCache;
-
};
#endif // CROSSGEN_COMPILE
diff --git a/src/vm/listlock.cpp b/src/vm/listlock.cpp
deleted file mode 100644
index 450e85aef5..0000000000
--- a/src/vm/listlock.cpp
+++ /dev/null
@@ -1,96 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-// ===========================================================================
-// File: ListLock.cpp
-//
-
-//
-// ===========================================================================
-// This file decribes the list lock and deadlock aware list lock.
-// ===========================================================================
-
-
-#include "common.h"
-#include "listlock.h"
-#include "listlock.inl"
-
-ListLockEntry::ListLockEntry(ListLock *pList, void *pData, const char *description)
- : m_deadlock(description),
- m_pList(pList),
- m_pData(pData),
- m_Crst(CrstListLock,
- (CrstFlags)(CRST_REENTRANCY | (pList->IsHostBreakable()?CRST_HOST_BREAKABLE:0))),
- m_pszDescription(description),
- m_pNext(NULL),
- m_dwRefCount(1),
- m_hrResultCode(S_FALSE),
- m_hInitException(NULL),
- m_pLoaderAllocator(NULL)
-#ifdef FEATURE_CORRUPTING_EXCEPTIONS
- ,
- m_CorruptionSeverity(NotCorrupting)
-#endif // FEATURE_CORRUPTING_EXCEPTIONS
-{
- WRAPPER_NO_CONTRACT;
-}
-
-ListLockEntry *ListLockEntry::Find(ListLock* pLock, LPVOID pPointer, const char *description)
-{
- CONTRACTL
- {
- THROWS;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- _ASSERTE(pLock->HasLock());
-
- ListLockEntry *pEntry = pLock->Find(pPointer);
- if (pEntry==NULL)
- {
- pEntry = new ListLockEntry(pLock, pPointer, description);
- pLock->AddElement(pEntry);
- }
- else
- pEntry->AddRef();
-
- return pEntry;
-};
-
-void ListLockEntry::AddRef()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- PRECONDITION(CheckPointer(this));
- }
- CONTRACTL_END;
-
- FastInterlockIncrement((LONG*)&m_dwRefCount);
-}
-
-void ListLockEntry::Release()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_TRIGGERS;
- MODE_ANY;
- PRECONDITION(CheckPointer(this));
- }
- CONTRACTL_END;
-
- ListLockHolder lock(m_pList);
-
- if (FastInterlockDecrement((LONG*)&m_dwRefCount) == 0)
- {
- // Remove from list
- m_pList->Unlink(this);
- delete this;
- }
-};
-
diff --git a/src/vm/listlock.h b/src/vm/listlock.h
index e16741a7d7..db953c8b55 100644
--- a/src/vm/listlock.h
+++ b/src/vm/listlock.h
@@ -17,7 +17,8 @@
#include "threads.h"
#include "crst.h"
-class ListLock;
+template < typename ELEMENT >
+class ListLockBase;
// This structure is used for running class init methods or JITing methods
// (m_pData points to a FunctionDesc). This class cannot have a destructor since it is used
// in function that also have EX_TRY's and the VC compiler doesn't allow classes with destructors
@@ -25,9 +26,14 @@ class ListLock;
// <TODO>@FUTURE Keep a pool of these (e.g. an array), so we don't have to allocate on the fly</TODO>
// m_hInitException contains a handle to the exception thrown by the class init. This
// allows us to throw this information to the caller on subsequent class init attempts.
-class ListLockEntry
+template < typename ELEMENT >
+class ListLockEntryBase
{
- friend class ListLock;
+ friend class ListLockBase<ELEMENT>;
+ typedef ListLockEntryBase<ELEMENT> Entry_t;
+ typedef ListLockBase<ELEMENT> List_t;
+ typedef typename List_t::LockHolder ListLockHolder;
+
public:
#ifdef _DEBUG
@@ -40,11 +46,11 @@ public:
#endif // DEBUG
DeadlockAwareLock m_deadlock;
- ListLock * m_pList;
- void * m_pData;
+ List_t * m_pList;
+ ELEMENT m_data;
Crst m_Crst;
const char * m_pszDescription;
- ListLockEntry * m_pNext;
+ Entry_t * m_pNext;
DWORD m_dwRefCount;
HRESULT m_hrResultCode;
LOADERHANDLE m_hInitException;
@@ -54,9 +60,27 @@ public:
CorruptionSeverity m_CorruptionSeverity;
#endif // FEATURE_CORRUPTING_EXCEPTIONS
- ListLockEntry(ListLock *pList, void *pData, const char *description = NULL);
+ ListLockEntryBase(List_t *pList, ELEMENT data, const char *description = NULL)
+ : m_deadlock(description),
+ m_pList(pList),
+ m_data(data),
+ m_Crst(CrstListLock,
+ (CrstFlags)(CRST_REENTRANCY | (pList->IsHostBreakable() ? CRST_HOST_BREAKABLE : 0))),
+ m_pszDescription(description),
+ m_pNext(NULL),
+ m_dwRefCount(1),
+ m_hrResultCode(S_FALSE),
+ m_hInitException(NULL),
+ m_pLoaderAllocator(dac_cast<PTR_LoaderAllocator>(nullptr))
+#ifdef FEATURE_CORRUPTING_EXCEPTIONS
+ ,
+ m_CorruptionSeverity(NotCorrupting)
+#endif // FEATURE_CORRUPTING_EXCEPTIONS
+ {
+ WRAPPER_NO_CONTRACT;
+ }
- virtual ~ListLockEntry()
+ virtual ~ListLockEntryBase()
{
}
@@ -102,10 +126,65 @@ public:
m_Crst.Leave();
}
- static ListLockEntry *Find(ListLock* pLock, LPVOID pPointer, const char *description = NULL) DAC_EMPTY_RET(NULL);
+ static Entry_t *Find(List_t* pLock, ELEMENT data, const char *description = NULL)
+ {
+ CONTRACTL
+ {
+ THROWS;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ }
+ CONTRACTL_END;
+
+ _ASSERTE(pLock->HasLock());
+
+ Entry_t *pEntry = pLock->Find(data);
+ if (pEntry == NULL)
+ {
+ pEntry = new Entry_t(pLock, data, description);
+ pLock->AddElement(pEntry);
+ }
+ else
+ pEntry->AddRef();
+
+ return pEntry;
+ };
+
+
+ void AddRef()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(this));
+ }
+ CONTRACTL_END;
+
+ FastInterlockIncrement((LONG*)&m_dwRefCount);
+ }
+
+ void Release()
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_ANY;
+ PRECONDITION(CheckPointer(this));
+ }
+ CONTRACTL_END;
- void AddRef() DAC_EMPTY_ERR();
- void Release() DAC_EMPTY_ERR();
+ ListLockHolder lock(m_pList);
+
+ if (FastInterlockDecrement((LONG*)&m_dwRefCount) == 0)
+ {
+ // Remove from list
+ m_pList->Unlink(this);
+ delete this;
+ }
+ };
#ifdef _DEBUG
BOOL HasLock()
@@ -117,14 +196,14 @@ public:
// LockHolder holds the lock of the element, not the element itself
- DEBUG_NOINLINE static void LockHolderEnter(ListLockEntry *pThis) PUB
+ DEBUG_NOINLINE static void LockHolderEnter(Entry_t *pThis) PUB
{
WRAPPER_NO_CONTRACT;
ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
pThis->Enter();
}
- DEBUG_NOINLINE static void LockHolderLeave(ListLockEntry *pThis) PUB
+ DEBUG_NOINLINE static void LockHolderLeave(Entry_t *pThis) PUB
{
WRAPPER_NO_CONTRACT;
ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
@@ -139,7 +218,7 @@ public:
m_deadlock.EndEnterLock();
}
- typedef Wrapper<ListLockEntry *, ListLockEntry::LockHolderEnter, ListLockEntry::LockHolderLeave> LockHolderBase;
+ typedef Wrapper<Entry_t *, LockHolderEnter, LockHolderLeave> LockHolderBase;
class LockHolder : public LockHolderBase
{
@@ -150,32 +229,36 @@ public:
{
}
- LockHolder(ListLockEntry *value, BOOL take = TRUE)
+ LockHolder(Entry_t *value, BOOL take = TRUE)
: LockHolderBase(value, take)
{
}
BOOL DeadlockAwareAcquire()
{
- if (!m_acquired && m_value != NULL)
+ if (!this->m_acquired && this->m_value != NULL)
{
- if (!m_value->m_deadlock.TryBeginEnterLock())
+ if (!this->m_value->m_deadlock.TryBeginEnterLock())
return FALSE;
- m_value->FinishDeadlockAwareEnter();
- m_acquired = TRUE;
+ this->m_value->FinishDeadlockAwareEnter();
+ this->m_acquired = TRUE;
}
return TRUE;
}
};
};
-class ListLock
+template < typename ELEMENT >
+class ListLockBase
{
+ typedef ListLockBase<ELEMENT> List_t;
+ typedef ListLockEntryBase<ELEMENT> Entry_t;
+
protected:
CrstStatic m_Crst;
BOOL m_fInited;
BOOL m_fHostBreakable; // Lock can be broken by a host for deadlock detection
- ListLockEntry * m_pHead;
+ Entry_t * m_pHead;
public:
@@ -219,7 +302,7 @@ class ListLock
return m_fHostBreakable;
}
- void AddElement(ListLockEntry* pElement)
+ void AddElement(Entry_t* pElement)
{
WRAPPER_NO_CONTRACT;
pElement->m_pNext = m_pHead;
@@ -257,10 +340,39 @@ class ListLock
// Must own the lock before calling this or is ok if the debugger has
// all threads stopped
- ListLockEntry *Find(void *pData);
+ inline Entry_t *Find(ELEMENT data)
+ {
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+#ifdef DEBUGGING_SUPPORTED
+ PRECONDITION(m_Crst.OwnedByCurrentThread() ||
+ CORDebuggerAttached()
+ // This condition should be true, but it is awkward to assert it because adding dbginterface.h creates lots of cycles in the includes
+ // It didn't seem valuable enough to refactor out a wrapper just to preserve it
+ /* && g_pDebugInterface->IsStopped() */);
+#else
+ PRECONDITION(m_Crst.OwnedByCurrentThread());
+#endif // DEBUGGING_SUPPORTED
+
+ }
+ CONTRACTL_END;
+
+ Entry_t *pSearch;
+
+ for (pSearch = m_pHead; pSearch != NULL; pSearch = pSearch->m_pNext)
+ {
+ if (pSearch->m_data == data)
+ return pSearch;
+ }
+
+ return NULL;
+ }
// Must own the lock before calling this!
- ListLockEntry* Pop(BOOL unloading = FALSE)
+ Entry_t* Pop(BOOL unloading = FALSE)
{
LIMITED_METHOD_CONTRACT;
#ifdef _DEBUG
@@ -269,13 +381,13 @@ class ListLock
#endif
if(m_pHead == NULL) return NULL;
- ListLockEntry* pEntry = m_pHead;
+ Entry_t* pEntry = m_pHead;
m_pHead = m_pHead->m_pNext;
return pEntry;
}
// Must own the lock before calling this!
- ListLockEntry* Peek()
+ Entry_t* Peek()
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(m_Crst.OwnedByCurrentThread());
@@ -283,12 +395,12 @@ class ListLock
}
// Must own the lock before calling this!
- BOOL Unlink(ListLockEntry *pItem)
+ BOOL Unlink(Entry_t *pItem)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(m_Crst.OwnedByCurrentThread());
- ListLockEntry *pSearch;
- ListLockEntry *pPrev;
+ Entry_t *pSearch;
+ Entry_t *pPrev;
pPrev = NULL;
@@ -320,21 +432,21 @@ class ListLock
}
#endif
- DEBUG_NOINLINE static void HolderEnter(ListLock *pThis)
+ DEBUG_NOINLINE static void HolderEnter(List_t *pThis)
{
WRAPPER_NO_CONTRACT;
ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
pThis->Enter();
}
- DEBUG_NOINLINE static void HolderLeave(ListLock *pThis)
+ DEBUG_NOINLINE static void HolderLeave(List_t *pThis)
{
WRAPPER_NO_CONTRACT;
ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT;
pThis->Leave();
}
- typedef Wrapper<ListLock*, ListLock::HolderEnter, ListLock::HolderLeave> LockHolder;
+ typedef Wrapper<List_t*, List_t::HolderEnter, List_t::HolderLeave> LockHolder;
};
class WaitingThreadListElement
@@ -344,6 +456,9 @@ public:
WaitingThreadListElement * m_pNext;
};
+typedef class ListLockBase<void*> ListLock;
+typedef class ListLockEntryBase<void*> ListLockEntry;
+
// Holds the lock of the ListLock
typedef ListLock::LockHolder ListLockHolder;
diff --git a/src/vm/listlock.inl b/src/vm/listlock.inl
deleted file mode 100644
index 17e383edd7..0000000000
--- a/src/vm/listlock.inl
+++ /dev/null
@@ -1,51 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-// ===========================================================================
-//
-
-//
-// File: ListLock.inl
-//
-// ===========================================================================
-// This file decribes the list lock and deadlock aware list lock functions
-// that are inlined but can't go in the header.
-// ===========================================================================
-#ifndef LISTLOCK_INL
-#define LISTLOCK_INL
-
-#include "listlock.h"
-#include "dbginterface.h"
-// Must own the lock before calling this or is ok if the debugger has
-// all threads stopped
-
-inline ListLockEntry *ListLock::Find(void *pData)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- PRECONDITION(CheckPointer(this));
-#ifdef DEBUGGING_SUPPORTED
- PRECONDITION(m_Crst.OwnedByCurrentThread() ||
- CORDebuggerAttached() && g_pDebugInterface->IsStopped());
-#else
- PRECONDITION(m_Crst.OwnedByCurrentThread());
-#endif // DEBUGGING_SUPPORTED
-
- }
- CONTRACTL_END;
-
- ListLockEntry *pSearch;
-
- for (pSearch = m_pHead; pSearch != NULL; pSearch = pSearch->m_pNext)
- {
- if (pSearch->m_pData == pData)
- return pSearch;
- }
-
- return NULL;
-}
-
-
-#endif // LISTLOCK_I
diff --git a/src/vm/loaderallocator.cpp b/src/vm/loaderallocator.cpp
index 1a05bf2c05..ff54277efd 100644
--- a/src/vm/loaderallocator.cpp
+++ b/src/vm/loaderallocator.cpp
@@ -1005,7 +1005,9 @@ void LoaderAllocator::Init(BaseDomain *pDomain, BYTE *pExecutableHeapMemory)
dwExecutableHeapReserveSize,
LOADERHEAP_PROFILE_COUNTER,
NULL,
- TRUE /* Make heap executable */);
+ TRUE /* Make heap executable */,
+ FALSE /* Disable zero-initialization (needed by UMEntryThunkCode::Poison) */
+ );
initReservedMem += dwExecutableHeapReserveSize;
}
diff --git a/src/vm/loaderallocator.hpp b/src/vm/loaderallocator.hpp
index 72fa59857d..b057283136 100644
--- a/src/vm/loaderallocator.hpp
+++ b/src/vm/loaderallocator.hpp
@@ -62,7 +62,9 @@ public:
class StringLiteralMap;
class VirtualCallStubManager;
-class ListLockEntry;
+template <typename ELEMENT>
+class ListLockEntryBase;
+typedef ListLockEntryBase<void*> ListLockEntry;
class LoaderAllocator
{
diff --git a/src/vm/marshalnative.cpp b/src/vm/marshalnative.cpp
index 34d7a861b5..a552ef33f5 100644
--- a/src/vm/marshalnative.cpp
+++ b/src/vm/marshalnative.cpp
@@ -28,7 +28,6 @@
#include "fieldmarshaler.h"
#include "cgensys.h"
#include "gcheaputilities.h"
-#include "security.h"
#include "dbginterface.h"
#include "marshalnative.h"
#include "fcall.h"
diff --git a/src/vm/memberload.cpp b/src/vm/memberload.cpp
index aa5667dd21..86be548cd2 100644
--- a/src/vm/memberload.cpp
+++ b/src/vm/memberload.cpp
@@ -30,7 +30,6 @@
#include "fieldmarshaler.h"
#include "cgensys.h"
#include "gcheaputilities.h"
-#include "security.h"
#include "dbginterface.h"
#include "comdelegate.h"
#include "sigformat.h"
@@ -45,7 +44,6 @@
#include "virtualcallstub.h"
#include "eeconfig.h"
#include "contractimpl.h"
-#include "listlock.inl"
#include "generics.h"
#include "instmethhash.h"
#include "typestring.h"
diff --git a/src/vm/metasig.h b/src/vm/metasig.h
index c2dc42fb9d..bbd326ebe3 100644
--- a/src/vm/metasig.h
+++ b/src/vm/metasig.h
@@ -56,6 +56,7 @@
// T -- TypedReference -- TypedReference
// G -- -- Generic type variable
// M -- -- Generic method variable
+// GI -- -- Generic type instantiation
//
//#DEFINE_METASIG
@@ -128,6 +129,8 @@
#define G(n) METASIG_ATOM(ELEMENT_TYPE_VAR) METASIG_ATOM(n)
#define M(n) METASIG_ATOM(ELEMENT_TYPE_MVAR) METASIG_ATOM(n)
+#define GI(type, n, x) METASIG_ATOM(ELEMENT_TYPE_GENERICINST) type METASIG_ATOM(n) x
+
// The references to other types have special definition in some cases
#ifndef C
#define C(x) METASIG_ATOM(ELEMENT_TYPE_CLASS) METASIG_ATOM(CLASS__ ## x % 0x100) METASIG_ATOM(CLASS__ ## x / 0x100)
@@ -145,6 +148,8 @@
#define G(n) METASIG_ATOM(ELEMENT_TYPE_VAR)
#define M(n) METASIG_ATOM(ELEMENT_TYPE_MVAR)
+#define GI(type, n, x) METASIG_ATOM(ELEMENT_TYPE_GENERICINST)
+
// The references to other types have special definition in some cases
#ifndef C
#define C(x) METASIG_ATOM(ELEMENT_TYPE_CLASS)
@@ -285,6 +290,11 @@ DEFINE_METASIG(GM(RefByte_T_RetVoid, IMAGE_CEE_CS_CALLCONV_DEFAULT, 1, r(b) M(0)
DEFINE_METASIG(GM(PtrVoid_RetT, IMAGE_CEE_CS_CALLCONV_DEFAULT, 1, P(v), M(0)))
DEFINE_METASIG(GM(PtrVoid_T_RetVoid, IMAGE_CEE_CS_CALLCONV_DEFAULT, 1, P(v) M(0), v))
+DEFINE_METASIG(GM(RefTFrom_RetRefTTo, IMAGE_CEE_CS_CALLCONV_DEFAULT, 2, r(M(0)), r(M(1))))
+DEFINE_METASIG(GM(Obj_RetT, IMAGE_CEE_CS_CALLCONV_DEFAULT, 1, j, M(0)))
+DEFINE_METASIG(GM(RefT_Int_RetRefT, IMAGE_CEE_CS_CALLCONV_DEFAULT, 1, r(M(0)) i, r(M(0))))
+DEFINE_METASIG(GM(PtrVoid_Int_RetPtrVoid, IMAGE_CEE_CS_CALLCONV_DEFAULT, 1, P(v) i, P(v)))
+
DEFINE_METASIG_T(SM(SafeHandle_RefBool_RetIntPtr, C(SAFE_HANDLE) r(F), I ))
DEFINE_METASIG_T(SM(SafeHandle_RetVoid, C(SAFE_HANDLE), v ))
@@ -382,6 +392,7 @@ DEFINE_METASIG(IM(Bool_Bool_RetStr, F F, s))
DEFINE_METASIG(IM(PtrChar_RetVoid, P(u), v))
DEFINE_METASIG(IM(PtrChar_Int_Int_RetVoid, P(u) i i, v))
+DEFINE_METASIG_T(IM(ReadOnlySpanOfChar_RetVoid, GI(g(READONLY_SPAN), 1, u), v))
DEFINE_METASIG(IM(PtrSByt_RetVoid, P(B), v))
DEFINE_METASIG(IM(PtrSByt_Int_Int_RetVoid, P(B) i i, v))
DEFINE_METASIG_T(IM(PtrSByt_Int_Int_Encoding_RetVoid, P(B) i i C(ENCODING), v))
@@ -393,6 +404,7 @@ DEFINE_METASIG(IM(ArrChar_Int_Int_RetStr, a(u) i i, s))
DEFINE_METASIG(IM(Char_Int_RetStr, u i, s))
DEFINE_METASIG(IM(PtrChar_RetStr, P(u), s))
DEFINE_METASIG(IM(PtrChar_Int_Int_RetStr, P(u) i i, s))
+DEFINE_METASIG_T(IM(ReadOnlySpanOfChar_RetStr, GI(g(READONLY_SPAN), 1, u), s))
DEFINE_METASIG(IM(Obj_Int_RetIntPtr, j i, I))
DEFINE_METASIG(IM(Char_Char_RetStr, u u, s))
@@ -405,6 +417,8 @@ DEFINE_METASIG(IM(Int_RefIntPtr_RefIntPtr_RefIntPtr_RetVoid, i r(I) r(I) r(I), v
DEFINE_METASIG(IM(Int_RetStr, i, s))
DEFINE_METASIG(IM(Int_RetVoid, i, v))
DEFINE_METASIG(IM(Int_RetBool, i, F))
+DEFINE_METASIG(IM(Int_Int_RetVoid, i i, v))
+DEFINE_METASIG(IM(Int_Int_Int_RetVoid, i i i, v))
DEFINE_METASIG(IM(Int_Int_Int_Int_RetVoid, i i i i, v))
DEFINE_METASIG_T(IM(Obj_EventArgs_RetVoid, j C(EVENT_ARGS), v))
DEFINE_METASIG_T(IM(Obj_UnhandledExceptionEventArgs_RetVoid, j C(UNHANDLED_EVENTARGS), v))
@@ -608,6 +622,7 @@ DEFINE_METASIG_T(IM(IAsyncResult_RetVoid, C(IASYNCRESULT), v))
#undef T
#undef G
#undef M
+#undef GI
#undef _
diff --git a/src/vm/method.cpp b/src/vm/method.cpp
index 845ce799a9..8778744537 100644
--- a/src/vm/method.cpp
+++ b/src/vm/method.cpp
@@ -12,7 +12,6 @@
#include "common.h"
-#include "security.h"
#include "excep.h"
#include "dbginterface.h"
#include "ecall.h"
@@ -30,9 +29,6 @@
#include "interoputil.h"
#include "prettyprintsig.h"
#include "formattype.h"
-#ifdef FEATURE_INTERPRETER
-#include "interpreter.h"
-#endif
#ifdef FEATURE_PREJIT
#include "compile.h"
@@ -942,118 +938,6 @@ BOOL MethodDesc::IsTightlyBoundToMethodTable()
#ifndef DACCESS_COMPILE
-
-//*******************************************************************************
-HRESULT MethodDesc::Verify(COR_ILMETHOD_DECODER* ILHeader,
- BOOL fThrowException,
- BOOL fForceVerify)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_TRIGGERS;
- INJECT_FAULT(return E_OUTOFMEMORY;);
- }
- CONTRACTL_END
-
-#ifdef _VER_EE_VERIFICATION_ENABLED
- // ForceVerify will force verification if the Verifier is OFF
- if (fForceVerify)
- goto DoVerify;
-
- // Don't even try to verify if verifier is off.
- if (g_fVerifierOff)
- return S_OK;
-
- if (IsVerified())
- return S_OK;
-
- // LazyCanSkipVerification does not resolve the policy.
- // We go ahead with verification if policy is not resolved.
- // In case the verification fails, we resolve policy and
- // fail verification if the Assembly of this method does not have
- // permission to skip verification.
-
- if (Security::LazyCanSkipVerification(GetModule()->GetDomainAssembly()))
- return S_OK;
-
-#ifdef _DEBUG
- _ASSERTE(Security::IsSecurityOn());
- _ASSERTE(GetModule() != SystemDomain::SystemModule());
-#endif // _DEBUG
-
-
-DoVerify:
-
- HRESULT hr;
-
- if (fThrowException)
- hr = Verifier::VerifyMethod(this, ILHeader, NULL,
- fForceVerify ? VER_FORCE_VERIFY : VER_STOP_ON_FIRST_ERROR);
- else
- hr = Verifier::VerifyMethodNoException(this, ILHeader);
-
- if (SUCCEEDED(hr))
- SetIsVerified(TRUE);
-
- return hr;
-#else // !_VER_EE_VERIFICATION_ENABLED
- _ASSERTE(!"EE Verification is disabled, should never get here");
- return E_FAIL;
-#endif // !_VER_EE_VERIFICATION_ENABLED
-}
-
-//*******************************************************************************
-
-BOOL MethodDesc::IsVerifiable()
-{
- STANDARD_VM_CONTRACT;
-
- if (IsVerified())
- return (m_wFlags & mdcVerifiable);
-
- if (!IsTypicalMethodDefinition())
- {
- // We cannot verify concrete instantiation (eg. List<int>.Add()).
- // We have to verify the typical instantiation (eg. List<T>.Add()).
- MethodDesc * pGenMethod = LoadTypicalMethodDefinition();
- BOOL isVerifiable = pGenMethod->IsVerifiable();
-
- // Propagate the result from the typical instantiation to the
- // concrete instantiation
- SetIsVerified(isVerifiable);
-
- return isVerifiable;
- }
-
- COR_ILMETHOD_DECODER *pHeader = NULL;
- // Don't use HasILHeader() here because it returns the wrong answer
- // for methods that have DynamicIL (not to be confused with DynamicMethods)
- if (IsIL() && !IsUnboxingStub())
- {
- COR_ILMETHOD_DECODER::DecoderStatus status;
- COR_ILMETHOD_DECODER header(GetILHeader(), GetMDImport(), &status);
- if (status != COR_ILMETHOD_DECODER::SUCCESS)
- {
- COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
- }
- pHeader = &header;
-
-#ifdef _VER_EE_VERIFICATION_ENABLED
- static ConfigDWORD peVerify;
- if (peVerify.val(CLRConfig::EXTERNAL_PEVerify))
- {
- HRESULT hr = Verify(&header, TRUE, FALSE);
- }
-#endif // _VER_EE_VERIFICATION_ENABLED
- }
-
- UnsafeJitFunction(this, pHeader, CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY));
- _ASSERTE(IsVerified());
-
- return (IsVerified() && (m_wFlags & mdcVerifiable));
-}
-
//*******************************************************************************
// Update flags in a thread safe manner.
WORD MethodDesc::InterlockedUpdateFlags(WORD wMask, BOOL fSet)
@@ -1176,16 +1060,6 @@ PCODE MethodDesc::GetNativeCode()
return pCode;
}
-#ifdef FEATURE_INTERPRETER
-#ifndef DACCESS_COMPILE // TODO: Need a solution that will work under DACCESS
- PCODE pEntryPoint = GetMethodEntryPoint();
- if (Interpreter::InterpretationStubToMethodInfo(pEntryPoint) == this)
- {
- return pEntryPoint;
- }
-#endif
-#endif
-
if (!HasStableEntryPoint() || HasPrecode())
return NULL;
@@ -2440,32 +2314,6 @@ BOOL MethodDesc::IsPointingToPrestub()
return GetPrecode()->IsPointingToPrestub();
}
-#ifdef FEATURE_INTERPRETER
-//*******************************************************************************
-BOOL MethodDesc::IsReallyPointingToPrestub()
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- SO_TOLERANT;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- if (!HasPrecode())
- {
- PCODE pCode = GetMethodEntryPoint();
- return HasTemporaryEntryPoint() && pCode == GetTemporaryEntryPoint();
- }
-
- if (!IsRestored())
- return TRUE;
-
- return GetPrecode()->IsPointingToPrestub();
-}
-#endif
-
//*******************************************************************************
void MethodDesc::Reset()
{
@@ -2699,9 +2547,6 @@ void MethodDesc::Save(DataImage *image)
{
STANDARD_VM_CONTRACT;
- // Make sure that the transparency is cached in the NGen image
- Security::IsMethodTransparent(this);
-
// Initialize the DoesNotHaveEquivalentValuetypeParameters flag.
// If we fail to determine whether there is a type-equivalent struct parameter (eg. because there is a struct parameter
// defined in a missing dependency), then just continue. The reason we run this method is to initialize a flag that is
@@ -3501,14 +3346,7 @@ MethodDesc::Fixup(
}
}
- if (decltype(InstantiatedMethodDesc::m_pPerInstInfo)::isRelative)
- {
- image->FixupRelativePointerField(this, offsetof(InstantiatedMethodDesc, m_pPerInstInfo));
- }
- else
- {
- image->FixupPointerField(this, offsetof(InstantiatedMethodDesc, m_pPerInstInfo));
- }
+ image->FixupPlainOrRelativePointerField((InstantiatedMethodDesc*) this, &InstantiatedMethodDesc::m_pPerInstInfo);
// Generic methods are dealt with specially to avoid encoding the formal method type parameters
if (IsTypicalMethodDefinition())
@@ -3587,14 +3425,7 @@ MethodDesc::Fixup(
NDirectMethodDesc *pNMD = (NDirectMethodDesc *)this;
- if (decltype(NDirectMethodDesc::ndirect.m_pWriteableData)::isRelative)
- {
- image->FixupRelativePointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pWriteableData));
- }
- else
- {
- image->FixupPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pWriteableData));
- }
+ image->FixupPlainOrRelativePointerField(pNMD, &NDirectMethodDesc::ndirect, &decltype(NDirectMethodDesc::ndirect)::m_pWriteableData);
NDirectWriteableData *pWriteableData = pNMD->GetWriteableData();
NDirectImportThunkGlue *pImportThunkGlue = pNMD->GetNDirectImportThunkGlue();
@@ -4185,7 +4016,7 @@ void MethodDesc::CheckRestore(ClassLoadLevel level)
// for details on the race.
//
{
- ReJitPublishMethodHolder publishWorker(this, GetNativeCode());
+ PublishMethodHolder publishWorker(this, GetNativeCode());
pIMD->m_wFlags2 = pIMD->m_wFlags2 & ~InstantiatedMethodDesc::Unrestored;
}
@@ -4968,11 +4799,7 @@ Precode* MethodDesc::GetOrCreatePrecode()
}
//*******************************************************************************
-BOOL MethodDesc::SetNativeCodeInterlocked(PCODE addr, PCODE pExpected /*=NULL*/
-#ifdef FEATURE_INTERPRETER
- , BOOL fStable
-#endif
- )
+BOOL MethodDesc::SetNativeCodeInterlocked(PCODE addr, PCODE pExpected /*=NULL*/)
{
CONTRACTL {
THROWS;
@@ -4998,28 +4825,8 @@ BOOL MethodDesc::SetNativeCodeInterlocked(PCODE addr, PCODE pExpected /*=NULL*/
value.SetValueMaybeNull(pSlot, addr | (*dac_cast<PTR_TADDR>(pSlot) & FIXUP_LIST_MASK));
expected.SetValueMaybeNull(pSlot, pExpected | (*dac_cast<PTR_TADDR>(pSlot) & FIXUP_LIST_MASK));
-#ifdef FEATURE_INTERPRETER
- BOOL fRet = FALSE;
-
- fRet = FastInterlockCompareExchangePointer(
- EnsureWritablePages(reinterpret_cast<TADDR*>(pSlot)),
- (TADDR&)value,
- (TADDR&)expected) == (TADDR&)expected;
-
- if (!fRet)
- {
- // Can always replace NULL.
- expected.SetValueMaybeNull(pSlot, (*dac_cast<PTR_TADDR>(pSlot) & FIXUP_LIST_MASK));
- fRet = FastInterlockCompareExchangePointer(
- EnsureWritablePages(reinterpret_cast<TADDR*>(pSlot)),
- (TADDR&)value,
- (TADDR&)expected) == (TADDR&)expected;
- }
- return fRet;
-#else // FEATURE_INTERPRETER
return FastInterlockCompareExchangePointer(EnsureWritablePages(reinterpret_cast<TADDR*>(pSlot)),
(TADDR&)value, (TADDR&)expected) == (TADDR&)expected;
-#endif // FEATURE_INTERPRETER
}
if (IsDefaultInterfaceMethod() && HasPrecode())
@@ -5027,17 +4834,8 @@ BOOL MethodDesc::SetNativeCodeInterlocked(PCODE addr, PCODE pExpected /*=NULL*/
return GetPrecode()->SetTargetInterlocked(addr);
}
-#ifdef FEATURE_INTERPRETER
- PCODE pFound = FastInterlockCompareExchangePointer(GetAddrOfSlot(), addr, pExpected);
- if (fStable)
- {
- InterlockedUpdateFlags2(enum_flag2_HasStableEntryPoint, TRUE);
- }
- return (pFound == pExpected);
-#else
_ASSERTE(pExpected == NULL);
return SetStableEntryPointInterlocked(addr);
-#endif
}
//*******************************************************************************
@@ -5061,26 +4859,6 @@ BOOL MethodDesc::SetStableEntryPointInterlocked(PCODE addr)
return fResult;
}
-#ifdef FEATURE_INTERPRETER
-BOOL MethodDesc::SetEntryPointInterlocked(PCODE addr)
-{
- CONTRACTL {
- NOTHROW;
- GC_NOTRIGGER;
- } CONTRACTL_END;
-
- _ASSERTE(!HasPrecode());
-
- PCODE pExpected = GetTemporaryEntryPoint();
- PTR_PCODE pSlot = GetAddrOfSlot();
-
- BOOL fResult = FastInterlockCompareExchangePointer(pSlot, addr, pExpected) == pExpected;
-
- return fResult;
-}
-
-#endif // FEATURE_INTERPRETER
-
//*******************************************************************************
void NDirectMethodDesc::InterlockedSetNDirectFlags(WORD wFlags)
{
@@ -5245,14 +5023,6 @@ BOOL MethodDesc::HasNativeCallableAttribute()
return FALSE;
}
-//*******************************************************************************
-BOOL MethodDesc::HasSuppressUnmanagedCodeAccessAttr()
-{
- LIMITED_METHOD_CONTRACT;
-
- return TRUE;
-}
-
#ifdef FEATURE_COMINTEROP
//*******************************************************************************
void ComPlusCallMethodDesc::InitComEventCallInfo()
diff --git a/src/vm/method.hpp b/src/vm/method.hpp
index 336260cae3..08318ec5b5 100644
--- a/src/vm/method.hpp
+++ b/src/vm/method.hpp
@@ -25,6 +25,7 @@
#include <stddef.h>
#include "eeconfig.h"
#include "precode.h"
+#include "codeversion.h"
#ifndef FEATURE_PREJIT
#include "fixuppointer.h"
@@ -42,6 +43,8 @@ class Dictionary;
class GCCoverageInfo;
class DynamicMethodDesc;
class ReJitManager;
+class CodeVersionManager;
+class PrepareCodeConfig;
typedef DPTR(FCallMethodDesc) PTR_FCallMethodDesc;
typedef DPTR(ArrayMethodDesc) PTR_ArrayMethodDesc;
@@ -143,29 +146,10 @@ enum MethodDescClassification
// Method is static
mdcStatic = 0x0020,
- // Temporary Security Interception.
- // Methods can now be intercepted by security. An intercepted method behaves
- // like it was an interpreted method. The Prestub at the top of the method desc
- // is replaced by an interception stub. Therefore, no back patching will occur.
- // We picked this approach to minimize the number variations given IL and native
- // code with edit and continue. E&C will need to find the real intercepted method
- // and if it is intercepted change the real stub. If E&C is enabled then there
- // is no back patching and needs to fix the pre-stub.
- mdcIntercepted = 0x0040,
-
- // Method requires linktime security checks.
- mdcRequiresLinktimeCheck = 0x0080,
-
- // Method requires inheritance security checks.
- // If this bit is set, then this method demands inheritance permissions
- // or a method that this method overrides demands inheritance permissions
- // or both.
- mdcRequiresInheritanceCheck = 0x0100,
-
- // The method that this method overrides requires an inheritance security check.
- // This bit is used as an optimization to avoid looking up overridden methods
- // during the inheritance check.
- mdcParentRequiresInheritanceCheck = 0x0200,
+ // unused = 0x0040,
+ // unused = 0x0080,
+ // unused = 0x0100,
+ // unused = 0x0200,
// Duplicate method. When a method needs to be placed in multiple slots in the
// method table, because it could not be packed into one slot. For eg, a method
@@ -268,10 +252,6 @@ public:
BOOL SetStableEntryPointInterlocked(PCODE addr);
-#ifdef FEATURE_INTERPRETER
- BOOL SetEntryPointInterlocked(PCODE addr);
-#endif // FEATURE_INTERPRETER
-
BOOL HasTemporaryEntryPoint();
PCODE GetTemporaryEntryPoint();
@@ -507,7 +487,12 @@ public:
BaseDomain *GetDomain();
- ReJitManager * GetReJitManager();
+#ifdef FEATURE_CODE_VERSIONING
+ CodeVersionManager* GetCodeVersionManager();
+#endif
+#ifdef FEATURE_TIERED_COMPILATION
+ CallCounter* GetCallCounter();
+#endif
PTR_LoaderAllocator GetLoaderAllocator();
@@ -669,7 +654,6 @@ public:
}
void ComputeSuppressUnmanagedCodeAccessAttr(IMDInternalImport *pImport);
- BOOL HasSuppressUnmanagedCodeAccessAttr();
BOOL HasNativeCallableAttribute();
#ifdef FEATURE_COMINTEROP
@@ -697,32 +681,6 @@ public:
// Update flags in a thread safe manner.
WORD InterlockedUpdateFlags(WORD wMask, BOOL fSet);
- inline DWORD IsInterceptedForDeclSecurity()
- {
- LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
- return m_wFlags & mdcIntercepted;
- }
-
- inline void SetInterceptedForDeclSecurity()
- {
- LIMITED_METHOD_CONTRACT;
- m_wFlags |= mdcIntercepted;
- }
-
- inline DWORD IsInterceptedForDeclSecurityCASDemandsOnly()
- {
- LIMITED_METHOD_CONTRACT;
- STATIC_CONTRACT_SO_TOLERANT;
- return m_bFlags2 & enum_flag2_CASDemandsOnly;
- }
-
- inline void SetInterceptedForDeclSecurityCASDemandsOnly()
- {
- LIMITED_METHOD_CONTRACT;
- m_bFlags2 |= enum_flag2_CASDemandsOnly;
- }
-
// If the method is in an Edit and Contine (EnC) module, then
// we DON'T want to backpatch this, ever. We MUST always call
// through the precode so that we can update the method.
@@ -746,7 +704,6 @@ public:
InterlockedUpdateFlags(mdcNotInline, set);
}
-
BOOL IsIntrospectionOnly();
#ifndef DACCESS_COMPILE
VOID EnsureActive();
@@ -811,50 +768,11 @@ public:
BOOL IsQCall();
//================================================================
- // Has the method been verified?
- // This does not mean that the IL is verifiable, just that we have
- // determined if the IL is verfiable or unverifiable.
- // (Is this is dead code since the JIT now does verification?)
-
- inline BOOL IsVerified()
- {
- LIMITED_METHOD_CONTRACT;
- return m_wFlags & mdcVerifiedState;
- }
-
- inline void SetIsVerified(BOOL isVerifiable)
- {
- WRAPPER_NO_CONTRACT;
-
- WORD flags = isVerifiable ? (WORD(mdcVerifiedState) | WORD(mdcVerifiable))
- : (WORD(mdcVerifiedState));
- InterlockedUpdateFlags(flags, TRUE);
- }
-
- inline void ResetIsVerified()
- {
- WRAPPER_NO_CONTRACT;
- InterlockedUpdateFlags(mdcVerifiedState | mdcVerifiable, FALSE);
- }
-
- BOOL IsVerifiable();
-
- // fThrowException is used to prevent Verifier from
- // throwin an exception on error
- // fForceVerify is to be used by tools that need to
- // force verifier to verify code even if the code is fully trusted.
- HRESULT Verify(COR_ILMETHOD_DECODER* ILHeader,
- BOOL fThrowException,
- BOOL fForceVerify);
-
-
- //================================================================
//
inline void ClearFlagsOnUpdate()
{
WRAPPER_NO_CONTRACT;
- ResetIsVerified();
SetNotInline(FALSE);
}
@@ -1229,45 +1147,6 @@ protected:
}
public:
- //==================================================================
- // Security...
-
- inline DWORD RequiresLinktimeCheck()
- {
- LIMITED_METHOD_CONTRACT;
- return m_wFlags & mdcRequiresLinktimeCheck;
- }
-
- inline DWORD RequiresInheritanceCheck()
- {
- LIMITED_METHOD_CONTRACT;
- return m_wFlags & mdcRequiresInheritanceCheck;
- }
-
- inline DWORD ParentRequiresInheritanceCheck()
- {
- LIMITED_METHOD_CONTRACT;
- return m_wFlags & mdcParentRequiresInheritanceCheck;
- }
-
- void SetRequiresLinktimeCheck()
- {
- LIMITED_METHOD_CONTRACT;
- m_wFlags |= mdcRequiresLinktimeCheck;
- }
-
- void SetRequiresInheritanceCheck()
- {
- LIMITED_METHOD_CONTRACT;
- m_wFlags |= mdcRequiresInheritanceCheck;
- }
-
- void SetParentRequiresInheritanceCheck()
- {
- LIMITED_METHOD_CONTRACT;
- m_wFlags |= mdcParentRequiresInheritanceCheck;
- }
-
mdMethodDef GetMemberDef() const;
mdMethodDef GetMemberDef_NoLogging() const;
@@ -1286,12 +1165,73 @@ public:
void SetChunkIndex(MethodDescChunk *pChunk);
BOOL IsPointingToPrestub();
-#ifdef FEATURE_INTERPRETER
- BOOL IsReallyPointingToPrestub();
-#endif // FEATURE_INTERPRETER
public:
+ // TRUE iff it is possible to change the code this method will run using
+ // the CodeVersionManager.
+ // Note: EnC currently returns FALSE here because it uses its own seperate
+ // scheme to manage versionability. We will likely want to converge them
+ // at some point.
+ BOOL IsVersionable()
+ {
+#ifndef FEATURE_CODE_VERSIONING
+ return FALSE;
+#else
+ return IsVersionableWithPrecode() || IsVersionableWithJumpStamp();
+#endif
+ }
+
+ // If true, these methods version using the CodeVersionManager and
+ // switch between different code versions by updating the target of the precode.
+ // Note: EnC returns FALSE - even though it uses precode updates it does not
+ // use the CodeVersionManager right now
+ BOOL IsVersionableWithPrecode()
+ {
+#ifdef FEATURE_CODE_VERSIONING
+ return
+ // policy: which things do we want to version with a precode if possible
+ IsEligibleForTieredCompilation() &&
+
+ // functional requirements:
+ !IsZapped() && // NGEN directly invokes the pre-generated native code.
+ // without necessarily going through the prestub or
+ // precode
+ HasNativeCodeSlot(); // the stable entry point will need to point at our
+ // precode and not directly contain the native code.
+#else
+ return FALSE;
+#endif
+ }
+
+ // If true, these methods version using the CodeVersionManager and switch between
+ // different code versions by overwriting the first bytes of the method's initial
+ // native code with a jmp instruction.
+ BOOL IsVersionableWithJumpStamp()
+ {
+#if defined(FEATURE_CODE_VERSIONING) && defined(FEATURE_JUMPSTAMP)
+ return
+ // for native image code this is policy, but for jitted code it is a functional requirement
+ // to ensure the prolog is sufficiently large
+ ReJitManager::IsReJITEnabled() &&
+
+ // functional requirement - the runtime doesn't expect both options to be possible
+ !IsVersionableWithPrecode() &&
+
+ // functional requirement - we must be able to evacuate the prolog and the prolog must be big
+ // enough, both of which are only designed to work on jitted code
+ (IsIL() || IsNoMetadata()) &&
+ !IsUnboxingStub() &&
+ !IsInstantiatingStub() &&
+
+ // functional requirement - code version manager can't handle what would happen if the code
+ // was collected
+ !GetLoaderAllocator()->IsCollectible();
+#else
+ return FALSE;
+#endif
+ }
+
#ifdef FEATURE_TIERED_COMPILATION
// Is this method allowed to be recompiled and the entrypoint redirected so that we
// can optimize its performance? Eligibility is invariant for the lifetime of a method.
@@ -1301,20 +1241,31 @@ public:
// This policy will need to change some more before tiered compilation feature
// can be properly supported across a broad range of scenarios. For instance it
- // wouldn't interact correctly debugging or profiling at the moment because we
- // enable it too aggresively and it conflicts with the operations of those features.
+ // wouldn't interact correctly with debugging at the moment because we enable
+ // it too aggresively and it conflicts with the operations of those features.
- //Keep in-sync with MethodTableBuilder::NeedsNativeCodeSlot(bmtMDMethod * pMDMethod)
- //In the future we might want mutable vtable slots too, but that would require
- //more work around the runtime to prevent those mutable pointers from leaking
+ // Keep in-sync with MethodTableBuilder::NeedsNativeCodeSlot(bmtMDMethod * pMDMethod)
+ // to ensure native slots are available where needed.
return g_pConfig->TieredCompilation() &&
- !GetModule()->HasNativeOrReadyToRunImage() &&
+ !IsZapped() &&
!IsEnCMethod() &&
- HasNativeCodeSlot();
+ HasNativeCodeSlot() &&
+ !IsUnboxingStub() &&
+ !IsInstantiatingStub();
+
+ // We should add an exclusion for modules with debuggable code gen flags
}
#endif
+ // Returns a code version that represents the first (default)
+ // code body that this method would have.
+ NativeCodeVersion GetInitialCodeVersion()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return NativeCodeVersion(dac_cast<PTR_MethodDesc>(this));
+ }
+
// Does this method force the NativeCodeSlot to stay fixed after it
// is first initialized to native code? Consumers of the native code
// pointer need to be very careful about if and when they cache it
@@ -1326,6 +1277,12 @@ public:
BOOL IsNativeCodeStableAfterInit()
{
LIMITED_METHOD_DAC_CONTRACT;
+
+#if defined(FEATURE_JIT_PITCHING)
+ if (IsPitchable())
+ return false;
+#endif
+
return
#ifdef FEATURE_TIERED_COMPILATION
!IsEligibleForTieredCompilation() &&
@@ -1371,11 +1328,7 @@ public:
return GetNativeCode() != NULL;
}
-#ifdef FEATURE_INTERPRETER
- BOOL SetNativeCodeInterlocked(PCODE addr, PCODE pExpected, BOOL fStable);
-#else // FEATURE_INTERPRETER
BOOL SetNativeCodeInterlocked(PCODE addr, PCODE pExpected = NULL);
-#endif // FEATURE_INTERPRETER
TADDR GetAddrOfNativeCodeSlot();
@@ -1442,6 +1395,11 @@ public:
// - ngened code if IsPreImplemented()
PCODE GetNativeCode();
+#if defined(FEATURE_JIT_PITCHING)
+ bool IsPitchable();
+ void PitchNativeCode();
+#endif
+
//================================================================
// FindOrCreateAssociatedMethodDesc
//
@@ -1685,69 +1643,11 @@ public:
PCODE DoPrestub(MethodTable *pDispatchingMT);
- PCODE MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS flags);
-
VOID GetMethodInfo(SString &namespaceOrClassName, SString &methodName, SString &methodSignature);
VOID GetMethodInfoWithNewSig(SString &namespaceOrClassName, SString &methodName, SString &methodSignature);
VOID GetMethodInfoNoSig(SString &namespaceOrClassName, SString &methodName);
VOID GetFullMethodInfo(SString& fullMethodSigName);
- BOOL IsCritical()
- {
- LIMITED_METHOD_CONTRACT;
- _ASSERTE(HasCriticalTransparentInfo());
- return (m_bFlags2 & enum_flag2_Transparency_Mask) != enum_flag2_Transparency_Transparent;
- }
-
- BOOL IsTreatAsSafe()
- {
- LIMITED_METHOD_CONTRACT;
- _ASSERTE(HasCriticalTransparentInfo());
- return (m_bFlags2 & enum_flag2_Transparency_Mask) == enum_flag2_Transparency_TreatAsSafe;
- }
-
- BOOL IsTransparent()
- {
- WRAPPER_NO_CONTRACT;
- _ASSERTE(HasCriticalTransparentInfo());
- return !IsCritical();
- }
-
- BOOL HasCriticalTransparentInfo()
- {
- LIMITED_METHOD_CONTRACT;
- return (m_bFlags2 & enum_flag2_Transparency_Mask) != enum_flag2_Transparency_Unknown;
- }
-
- void SetCriticalTransparentInfo(BOOL fIsCritical, BOOL fIsTreatAsSafe)
- {
- WRAPPER_NO_CONTRACT;
-
- // TreatAsSafe has to imply critical
- _ASSERTE(fIsCritical || !fIsTreatAsSafe);
-
- EnsureWritablePages(this);
- InterlockedUpdateFlags2(
- static_cast<BYTE>(fIsTreatAsSafe ? enum_flag2_Transparency_TreatAsSafe :
- fIsCritical ? enum_flag2_Transparency_Critical :
- enum_flag2_Transparency_Transparent),
- TRUE);
-
- _ASSERTE(HasCriticalTransparentInfo());
- }
-
- BOOL RequiresLinkTimeCheckHostProtectionOnly()
- {
- LIMITED_METHOD_CONTRACT;
- return (m_bFlags2 & enum_flag2_HostProtectionLinkCheckOnly) != 0;
- }
-
- void SetRequiresLinkTimeCheckHostProtectionOnly()
- {
- LIMITED_METHOD_CONTRACT;
- m_bFlags2 |= enum_flag2_HostProtectionLinkCheckOnly;
- }
-
BOOL HasTypeEquivalentStructParameters()
#ifndef FEATURE_TYPEEQUIVALENCE
{
@@ -1797,21 +1697,11 @@ protected:
enum_flag2_IsUnboxingStub = 0x04,
enum_flag2_HasNativeCodeSlot = 0x08, // Has slot for native code
- enum_flag2_Transparency_Mask = 0x30,
- enum_flag2_Transparency_Unknown = 0x00, // The transparency has not been computed yet
- enum_flag2_Transparency_Transparent = 0x10, // Method is transparent
- enum_flag2_Transparency_Critical = 0x20, // Method is critical
- enum_flag2_Transparency_TreatAsSafe = 0x30, // Method is treat as safe. Also implied critical.
-
- // CAS Demands: Demands for Permissions that are CAS Permissions. CAS Perms are those
- // that derive from CodeAccessPermission and need a stackwalk to evaluate demands
- // Non-CAS perms are those that don't need a stackwalk and don't derive from CodeAccessPermission. The implementor
- // specifies the behavior on a demand. Examples: CAS: FileIOPermission. Non-CAS: PrincipalPermission.
- // This bit gets set if the demands are BCL CAS demands only. Even if there are non-BCL CAS demands, we don't set this
- // bit.
- enum_flag2_CASDemandsOnly = 0x40,
-
- enum_flag2_HostProtectionLinkCheckOnly = 0x80, // Method has LinkTime check due to HP only.
+ enum_flag2_IsJitIntrinsic = 0x10, // Jit may expand method as an intrinsic
+
+ // unused = 0x20,
+ // unused = 0x40,
+ // unused = 0x80,
};
BYTE m_bFlags2;
@@ -1861,6 +1751,18 @@ public:
m_bFlags2 |= enum_flag2_HasNativeCodeSlot;
}
+ inline BOOL IsJitIntrinsic()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ return (m_bFlags2 & enum_flag2_IsJitIntrinsic) != 0;
+ }
+
+ inline void SetIsJitIntrinsic()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_bFlags2 |= enum_flag2_IsJitIntrinsic;
+ }
+
static const SIZE_T s_ClassificationSizeTable[];
static SIZE_T GetBaseSize(DWORD classification)
@@ -1949,8 +1851,72 @@ public:
REFLECTMETHODREF GetStubMethodInfo();
PrecodeType GetPrecodeType();
+
+
+ // ---------------------------------------------------------------------------------
+ // IL based Code generation pipeline
+ // ---------------------------------------------------------------------------------
+
+#ifndef DACCESS_COMPILE
+public:
+ PCODE PrepareInitialCode();
+ PCODE PrepareCode(NativeCodeVersion codeVersion);
+ PCODE PrepareCode(PrepareCodeConfig* pConfig);
+
+private:
+ PCODE PrepareILBasedCode(PrepareCodeConfig* pConfig);
+ PCODE GetPrecompiledCode(PrepareCodeConfig* pConfig);
+ PCODE GetPrecompiledNgenCode();
+ PCODE GetPrecompiledR2RCode();
+ PCODE GetMulticoreJitCode();
+ COR_ILMETHOD_DECODER* GetAndVerifyILHeader(PrepareCodeConfig* pConfig, COR_ILMETHOD_DECODER* pIlDecoderMemory);
+ COR_ILMETHOD_DECODER* GetAndVerifyMetadataILHeader(PrepareCodeConfig* pConfig, COR_ILMETHOD_DECODER* pIlDecoderMemory);
+ COR_ILMETHOD_DECODER* GetAndVerifyNoMetadataILHeader();
+ PCODE JitCompileCode(PrepareCodeConfig* pConfig);
+ PCODE JitCompileCodeLockedEventWrapper(PrepareCodeConfig* pConfig, JitListLockEntry* pEntry);
+ PCODE JitCompileCodeLocked(PrepareCodeConfig* pConfig, JitListLockEntry* pLockEntry, ULONG* pSizeOfCode, CORJIT_FLAGS* pFlags);
+#endif // DACCESS_COMPILE
};
+#ifndef DACCESS_COMPILE
+class PrepareCodeConfig
+{
+public:
+ PrepareCodeConfig();
+ PrepareCodeConfig(NativeCodeVersion nativeCodeVersion, BOOL needsMulticoreJitNotification, BOOL mayUsePrecompiledCode);
+ MethodDesc* GetMethodDesc();
+ NativeCodeVersion GetCodeVersion();
+ BOOL NeedsMulticoreJitNotification();
+ BOOL MayUsePrecompiledCode();
+ virtual PCODE IsJitCancellationRequested();
+ virtual BOOL SetNativeCode(PCODE pCode, PCODE * ppAlternateCodeToUse);
+ virtual COR_ILMETHOD* GetILHeader();
+ virtual CORJIT_FLAGS GetJitCompilationFlags();
+
+protected:
+ MethodDesc* m_pMethodDesc;
+ NativeCodeVersion m_nativeCodeVersion;
+ BOOL m_needsMulticoreJitNotification;
+ BOOL m_mayUsePrecompiledCode;
+};
+
+#ifdef FEATURE_CODE_VERSIONING
+class VersionedPrepareCodeConfig : public PrepareCodeConfig
+{
+public:
+ VersionedPrepareCodeConfig();
+ VersionedPrepareCodeConfig(NativeCodeVersion codeVersion);
+ HRESULT FinishConfiguration();
+ virtual PCODE IsJitCancellationRequested();
+ virtual BOOL SetNativeCode(PCODE pCode, PCODE * ppAlternateCodeToUse);
+ virtual COR_ILMETHOD* GetILHeader();
+ virtual CORJIT_FLAGS GetJitCompilationFlags();
+private:
+ ILCodeVersion m_ilCodeVersion;
+};
+#endif // FEATURE_CODE_VERSIONING
+#endif // DACCESS_COMPILE
+
/******************************************************************/
// A code:MethodDescChunk is a container that holds one or more code:MethodDesc. Logically it is just
diff --git a/src/vm/method.inl b/src/vm/method.inl
index cdd137b84b..dd14900c12 100644
--- a/src/vm/method.inl
+++ b/src/vm/method.inl
@@ -203,11 +203,21 @@ inline BOOL HasTypeEquivalentStructParameters()
}
#endif // FEATURE_TYPEEQUIVALENCE
-inline ReJitManager * MethodDesc::GetReJitManager()
+#ifdef FEATURE_CODE_VERSIONING
+inline CodeVersionManager * MethodDesc::GetCodeVersionManager()
{
LIMITED_METHOD_CONTRACT;
- return GetModule()->GetReJitManager();
+ return GetModule()->GetCodeVersionManager();
}
+#endif
+
+#ifdef FEATURE_TIERED_COMPILATION
+inline CallCounter * MethodDesc::GetCallCounter()
+{
+ LIMITED_METHOD_CONTRACT;
+ return GetModule()->GetCallCounter();
+}
+#endif
#endif // _METHOD_INL_
diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
index face764e0f..0adba6ac4e 100644
--- a/src/vm/methodtable.cpp
+++ b/src/vm/methodtable.cpp
@@ -33,7 +33,6 @@
#include "fieldmarshaler.h"
#include "cgensys.h"
#include "gcheaputilities.h"
-#include "security.h"
#include "dbginterface.h"
#include "comdelegate.h"
#include "eventtrace.h"
@@ -67,7 +66,6 @@
#include "typeequivalencehash.hpp"
#endif
-#include "listlock.inl"
#include "generics.h"
#include "genericdict.h"
#include "typestring.h"
@@ -593,7 +591,7 @@ void MethodTable::SetIsRestored()
// for details on the race.
//
{
- ReJitPublishMethodTableHolder(this);
+ PublishMethodTableHolder(this);
FastInterlockAnd(EnsureWritablePages(&(GetWriteableDataForWrite()->m_dwFlags)), ~MethodTableWriteableData::enum_flag_Unrestored);
}
#ifndef DACCESS_COMPILE
@@ -1013,7 +1011,7 @@ void MethodTable::SetInterfaceMap(WORD wNumInterfaces, InterfaceInfo_t* iMap)
m_wNumInterfaces = wNumInterfaces;
CONSISTENCY_CHECK(IS_ALIGNED(iMap, sizeof(void*)));
- m_pInterfaceMap = iMap;
+ m_pInterfaceMap.SetValue(iMap);
}
//==========================================================================================
@@ -1236,7 +1234,12 @@ void MethodTable::AddDynamicInterface(MethodTable *pItfMT)
if (TotalNumInterfaces > 0) {
InterfaceInfo_t *pInterfaceMap = GetInterfaceMap();
PREFIX_ASSUME(pInterfaceMap != NULL);
- memcpy(pNewItfMap, pInterfaceMap, TotalNumInterfaces * sizeof(InterfaceInfo_t));
+
+ for (unsigned index = 0; index < TotalNumInterfaces; ++index)
+ {
+ InterfaceInfo_t *pIntInfo = (InterfaceInfo_t *) (pNewItfMap + index);
+ pIntInfo->SetMethodTable((pInterfaceMap + index)->GetMethodTable());
+ }
}
// Add the new interface at the end of the map.
@@ -1246,7 +1249,8 @@ void MethodTable::AddDynamicInterface(MethodTable *pItfMT)
*(((DWORD_PTR *)pNewItfMap) - 1) = NumDynAddedInterfaces + 1;
// Switch the old interface map with the new one.
- VolatileStore(EnsureWritablePages(&m_pInterfaceMap), pNewItfMap);
+ EnsureWritablePages(&m_pInterfaceMap);
+ m_pInterfaceMap.SetValueVolatile(pNewItfMap);
// Log the fact that we leaked the interface vtable map.
#ifdef _DEBUG
@@ -1287,7 +1291,7 @@ void MethodTable::SetupGenericsStaticsInfo(FieldDesc* pStaticFieldDescs)
pInfo->m_DynamicTypeID = (SIZE_T)-1;
}
- pInfo->m_pFieldDescs = pStaticFieldDescs;
+ pInfo->m_pFieldDescs.SetValueMaybeNull(pStaticFieldDescs);
}
#endif // !DACCESS_COMPILE
@@ -1784,7 +1788,7 @@ TypeHandle::CastResult MethodTable::CanCastToClassNoGC(MethodTable *pTargetMT)
if (pMT == pTargetMT)
return TypeHandle::CanCast;
- pMT = MethodTable::GetParentMethodTableOrIndirection(pMT);
+ pMT = MethodTable::GetParentMethodTable(pMT);
} while (pMT);
}
@@ -3147,7 +3151,7 @@ void MethodTable::AllocateRegularStaticBoxes()
OBJECTREF* pStaticSlots = (OBJECTREF*)(pStaticBase + pClassCtorInfoEntry->firstBoxedStaticOffset);
GCPROTECT_BEGININTERIOR(pStaticSlots);
- ArrayDPTR(FixupPointer<PTR_MethodTable>) ppMTs = GetLoaderModule()->GetZapModuleCtorInfo()->
+ ArrayDPTR(RelativeFixupPointer<PTR_MethodTable>) ppMTs = GetLoaderModule()->GetZapModuleCtorInfo()->
GetGCStaticMTs(pClassCtorInfoEntry->firstBoxedStaticMTIndex);
DWORD numBoxedStatics = pClassCtorInfoEntry->numBoxedStatics;
@@ -4122,7 +4126,7 @@ void ModuleCtorInfo::AddElement(MethodTable *pMethodTable)
{
_ASSERTE(numElements == numLastAllocated);
- MethodTable ** ppOldMTEntries = ppMT;
+ RelativePointer<MethodTable *> *ppOldMTEntries = ppMT;
#ifdef _PREFAST_
#pragma warning(push)
@@ -4133,12 +4137,19 @@ void ModuleCtorInfo::AddElement(MethodTable *pMethodTable)
#pragma warning(pop)
#endif // _PREFAST_
- ppMT = new MethodTable* [numNewAllocated];
+ ppMT = new RelativePointer<MethodTable *> [numNewAllocated];
_ASSERTE(ppMT);
- memcpy(ppMT, ppOldMTEntries, sizeof(MethodTable *) * numLastAllocated);
- memset(ppMT + numLastAllocated, 0, sizeof(MethodTable *) * (numNewAllocated - numLastAllocated));
+ for (unsigned index = 0; index < numLastAllocated; ++index)
+ {
+ ppMT[index].SetValueMaybeNull(ppOldMTEntries[index].GetValueMaybeNull());
+ }
+
+ for (unsigned index = numLastAllocated; index < numNewAllocated; ++index)
+ {
+ ppMT[index].SetValueMaybeNull(NULL);
+ }
delete[] ppOldMTEntries;
@@ -4150,7 +4161,7 @@ void ModuleCtorInfo::AddElement(MethodTable *pMethodTable)
// Note the use of two "parallel" arrays. We do this to keep the workingset smaller since we
// often search (in GetClassCtorInfoIfExists) for a methodtable pointer but never actually find it.
- ppMT[numElements] = pMethodTable;
+ ppMT[numElements].SetValue(pMethodTable);
numElements++;
}
@@ -4276,16 +4287,32 @@ void MethodTable::Save(DataImage *image, DWORD profilingFlags)
// Dynamic interface maps have an additional DWORD_PTR preceding the InterfaceInfo_t array
if (HasDynamicInterfaceMap())
{
- ZapStoredStructure * pInterfaceMapNode = image->StoreInternedStructure(((DWORD_PTR *)GetInterfaceMap()) - 1,
- GetInterfaceMapSize(),
- DataImage::ITEM_INTERFACE_MAP);
-
+ ZapStoredStructure * pInterfaceMapNode;
+ if (decltype(InterfaceInfo_t::m_pMethodTable)::isRelative)
+ {
+ pInterfaceMapNode = image->StoreStructure(((DWORD_PTR *)GetInterfaceMap()) - 1,
+ GetInterfaceMapSize(),
+ DataImage::ITEM_INTERFACE_MAP);
+ }
+ else
+ {
+ pInterfaceMapNode = image->StoreInternedStructure(((DWORD_PTR *)GetInterfaceMap()) - 1,
+ GetInterfaceMapSize(),
+ DataImage::ITEM_INTERFACE_MAP);
+ }
image->BindPointer(GetInterfaceMap(), pInterfaceMapNode, sizeof(DWORD_PTR));
}
else
#endif // FEATURE_COMINTEROP
{
- image->StoreInternedStructure(GetInterfaceMap(), GetInterfaceMapSize(), DataImage::ITEM_INTERFACE_MAP);
+ if (decltype(InterfaceInfo_t::m_pMethodTable)::isRelative)
+ {
+ image->StoreStructure(GetInterfaceMap(), GetInterfaceMapSize(), DataImage::ITEM_INTERFACE_MAP);
+ }
+ else
+ {
+ image->StoreInternedStructure(GetInterfaceMap(), GetInterfaceMapSize(), DataImage::ITEM_INTERFACE_MAP);
+ }
}
SaveExtraInterfaceInfo(image);
@@ -4302,7 +4329,14 @@ void MethodTable::Save(DataImage *image, DWORD profilingFlags)
ZapStoredStructure * pPerInstInfoNode;
if (CanEagerBindToParentDictionaries(image, NULL))
{
- pPerInstInfoNode = image->StoreInternedStructure((BYTE *)GetPerInstInfo() - sizeof(GenericsDictInfo), GetPerInstInfoSize() + sizeof(GenericsDictInfo), DataImage::ITEM_DICTIONARY);
+ if (PerInstInfoElem_t::isRelative)
+ {
+ pPerInstInfoNode = image->StoreStructure((BYTE *)GetPerInstInfo() - sizeof(GenericsDictInfo), GetPerInstInfoSize() + sizeof(GenericsDictInfo), DataImage::ITEM_DICTIONARY);
+ }
+ else
+ {
+ pPerInstInfoNode = image->StoreInternedStructure((BYTE *)GetPerInstInfo() - sizeof(GenericsDictInfo), GetPerInstInfoSize() + sizeof(GenericsDictInfo), DataImage::ITEM_DICTIONARY);
+ }
}
else
{
@@ -4649,14 +4683,21 @@ BOOL MethodTable::IsWriteable()
// target module. Thus we want to call CanEagerBindToMethodTable
// to check we can hardbind to the containing structure.
static
-void HardBindOrClearDictionaryPointer(DataImage *image, MethodTable *pMT, void * p, SSIZE_T offset)
+void HardBindOrClearDictionaryPointer(DataImage *image, MethodTable *pMT, void * p, SSIZE_T offset, bool isRelative)
{
WRAPPER_NO_CONTRACT;
if (image->CanEagerBindToMethodTable(pMT) &&
image->CanHardBindToZapModule(pMT->GetLoaderModule()))
{
- image->FixupPointerField(p, offset);
+ if (isRelative)
+ {
+ image->FixupRelativePointerField(p, offset);
+ }
+ else
+ {
+ image->FixupPointerField(p, offset);
+ }
}
else
{
@@ -4694,7 +4735,7 @@ void MethodTable::Fixup(DataImage *image)
if (IsCanonicalMethodTable())
{
// Pointer to EEClass
- image->FixupPointerField(this, offsetof(MethodTable, m_pEEClass));
+ image->FixupPlainOrRelativePointerField(this, &MethodTable::m_pEEClass);
}
else
{
@@ -4709,7 +4750,7 @@ void MethodTable::Fixup(DataImage *image)
if (image->CanHardBindToZapModule(pCanonMT->GetLoaderModule()))
{
// Pointer to canonical methodtable
- image->FixupField(this, offsetof(MethodTable, m_pCanonMT), pCanonMT, UNION_METHODTABLE);
+ image->FixupPlainOrRelativeField(this, &MethodTable::m_pCanonMT, pCanonMT, UNION_METHODTABLE);
}
else
{
@@ -4727,18 +4768,28 @@ void MethodTable::Fixup(DataImage *image)
if (pImport != NULL)
{
- image->FixupFieldToNode(this, offsetof(MethodTable, m_pCanonMT), pImport, UNION_INDIRECTION);
+ image->FixupPlainOrRelativeFieldToNode(this, &MethodTable::m_pCanonMT, pImport, UNION_INDIRECTION);
}
}
- image->FixupField(this, offsetof(MethodTable, m_pLoaderModule), pZapModule);
+ image->FixupField(this, offsetof(MethodTable, m_pLoaderModule), pZapModule, 0, IMAGE_REL_BASED_RELPTR);
#ifdef _DEBUG
image->FixupPointerField(this, offsetof(MethodTable, debug_m_szClassName));
#endif // _DEBUG
MethodTable * pParentMT = GetParentMethodTable();
- _ASSERTE(!pNewMT->GetFlag(enum_flag_HasIndirectParent));
+ _ASSERTE(!pNewMT->m_pParentMethodTable.IsIndirectPtrMaybeNull());
+
+ ZapRelocationType relocType;
+ if (decltype(MethodTable::m_pParentMethodTable)::isRelative)
+ {
+ relocType = IMAGE_REL_BASED_RELPTR;
+ }
+ else
+ {
+ relocType = IMAGE_REL_BASED_PTR;
+ }
if (pParentMT != NULL)
{
@@ -4750,7 +4801,8 @@ void MethodTable::Fixup(DataImage *image)
{
if (image->CanHardBindToZapModule(pParentMT->GetLoaderModule()))
{
- image->FixupPointerField(this, offsetof(MethodTable, m_pParentMethodTable));
+ _ASSERTE(!m_pParentMethodTable.IsIndirectPtr());
+ image->FixupField(this, offsetof(MethodTable, m_pParentMethodTable), pParentMT, 0, relocType);
}
else
{
@@ -4786,8 +4838,7 @@ void MethodTable::Fixup(DataImage *image)
if (pImport != NULL)
{
- image->FixupFieldToNode(this, offsetof(MethodTable, m_pParentMethodTable), pImport, -(SSIZE_T)offsetof(MethodTable, m_pParentMethodTable));
- pNewMT->SetFlag(enum_flag_HasIndirectParent);
+ image->FixupFieldToNode(this, offsetof(MethodTable, m_pParentMethodTable), pImport, FIXUP_POINTER_INDIRECTION, relocType);
}
}
@@ -4800,14 +4851,14 @@ void MethodTable::Fixup(DataImage *image)
if (HasInterfaceMap())
{
- image->FixupPointerField(this, offsetof(MethodTable, m_pMultipurposeSlot2));
+ image->FixupPlainOrRelativePointerField(this, &MethodTable::m_pInterfaceMap);
FixupExtraInterfaceInfo(image);
}
_ASSERTE(GetWriteableData());
- image->FixupPointerField(this, offsetof(MethodTable, m_pWriteableData));
- m_pWriteableData->Fixup(image, this, needsRestore);
+ image->FixupPlainOrRelativePointerField(this, &MethodTable::m_pWriteableData);
+ m_pWriteableData.GetValue()->Fixup(image, this, needsRestore);
#ifdef FEATURE_COMINTEROP
if (HasGuidInfo())
@@ -4875,7 +4926,14 @@ void MethodTable::Fixup(DataImage *image)
VtableIndirectionSlotIterator it = IterateVtableIndirectionSlots();
while (it.Next())
{
- image->FixupPointerField(this, it.GetOffsetFromMethodTable());
+ if (VTableIndir_t::isRelative)
+ {
+ image->FixupRelativePointerField(this, it.GetOffsetFromMethodTable());
+ }
+ else
+ {
+ image->FixupPointerField(this, it.GetOffsetFromMethodTable());
+ }
}
}
@@ -4896,7 +4954,7 @@ void MethodTable::Fixup(DataImage *image)
{
// Virtual slots live in chunks pointed to by vtable indirections
- slotBase = (PVOID) GetVtableIndirections()[GetIndexOfVtableIndirection(slotNumber)];
+ slotBase = (PVOID) GetVtableIndirections()[GetIndexOfVtableIndirection(slotNumber)].GetValueMaybeNull();
slotOffset = GetIndexAfterVtableIndirection(slotNumber) * sizeof(PCODE);
}
else if (HasSingleNonVirtualSlot())
@@ -4991,7 +5049,7 @@ void MethodTable::Fixup(DataImage *image)
if (HasPerInstInfo())
{
// Fixup the pointer to the per-inst table
- image->FixupPointerField(this, offsetof(MethodTable, m_pPerInstInfo));
+ image->FixupPlainOrRelativePointerField(this, &MethodTable::m_pPerInstInfo);
for (MethodTable *pChain = this; pChain != NULL; pChain = pChain->GetParentMethodTable())
{
@@ -5004,10 +5062,23 @@ void MethodTable::Fixup(DataImage *image)
// We special-case the dictionary for this method table because we must always
// hard bind to it even if it's not in its preferred zap module
+ size_t sizeDict = sizeof(PerInstInfoElem_t);
+
if (pChain == this)
- image->FixupPointerField(GetPerInstInfo(), dictNum * sizeof(Dictionary *));
+ {
+ if (PerInstInfoElem_t::isRelative)
+ {
+ image->FixupRelativePointerField(GetPerInstInfo(), dictNum * sizeDict);
+ }
+ else
+ {
+ image->FixupPointerField(GetPerInstInfo(), dictNum * sizeDict);
+ }
+ }
else
- HardBindOrClearDictionaryPointer(image, pChain, GetPerInstInfo(), dictNum * sizeof(Dictionary *));
+ {
+ HardBindOrClearDictionaryPointer(image, pChain, GetPerInstInfo(), dictNum * sizeDict, PerInstInfoElem_t::isRelative);
+ }
}
}
}
@@ -5036,7 +5107,7 @@ void MethodTable::Fixup(DataImage *image)
{
GenericsStaticsInfo *pInfo = GetGenericsStaticsInfo();
- image->FixupPointerField(this, (BYTE *)&pInfo->m_pFieldDescs - (BYTE *)this);
+ image->FixupRelativePointerField(this, (BYTE *)&pInfo->m_pFieldDescs - (BYTE *)this);
if (!isCanonical)
{
for (DWORD i = 0; i < GetClass()->GetNumStaticFields(); i++)
@@ -5048,12 +5119,12 @@ void MethodTable::Fixup(DataImage *image)
if (NeedsCrossModuleGenericsStaticsInfo())
{
- MethodTableWriteableData * pNewWriteableData = (MethodTableWriteableData *)image->GetImagePointer(m_pWriteableData);
+ MethodTableWriteableData * pNewWriteableData = (MethodTableWriteableData *)image->GetImagePointer(m_pWriteableData.GetValue());
CrossModuleGenericsStaticsInfo * pNewCrossModuleGenericsStaticsInfo = pNewWriteableData->GetCrossModuleGenericsStaticsInfo();
pNewCrossModuleGenericsStaticsInfo->m_DynamicTypeID = pInfo->m_DynamicTypeID;
- image->ZeroPointerField(m_pWriteableData, sizeof(MethodTableWriteableData) + offsetof(CrossModuleGenericsStaticsInfo, m_pModuleForStatics));
+ image->ZeroPointerField(m_pWriteableData.GetValue(), sizeof(MethodTableWriteableData) + offsetof(CrossModuleGenericsStaticsInfo, m_pModuleForStatics));
pNewMT->SetFlag(enum_flag_StaticsMask_IfGenericsThenCrossModule);
}
@@ -5158,7 +5229,7 @@ void MethodTable::CheckRestore()
BOOL SatisfiesClassConstraints(TypeHandle instanceTypeHnd, TypeHandle typicalTypeHnd,
const InstantiationContext *pInstContext);
-static VOID DoAccessibilityCheck(MethodTable *pAskingMT, MethodTable *pTargetMT, UINT resIDWhy, BOOL checkTargetTypeTransparency)
+static VOID DoAccessibilityCheck(MethodTable *pAskingMT, MethodTable *pTargetMT, UINT resIDWhy)
{
CONTRACTL
{
@@ -5172,8 +5243,7 @@ static VOID DoAccessibilityCheck(MethodTable *pAskingMT, MethodTable *pTargetMT,
if (!ClassLoader::CanAccessClass(&accessContext,
pTargetMT, //the desired class
pTargetMT->GetAssembly(), //the desired class's assembly
- *AccessCheckOptions::s_pNormalAccessChecks,
- checkTargetTypeTransparency
+ *AccessCheckOptions::s_pNormalAccessChecks
))
{
SString displayName;
@@ -5222,7 +5292,7 @@ VOID DoAccessibilityCheckForConstraint(MethodTable *pAskingMT, TypeHandle thCons
}
else
{
- DoAccessibilityCheck(pAskingMT, thConstraint.GetMethodTable(), resIDWhy, FALSE);
+ DoAccessibilityCheck(pAskingMT, thConstraint.GetMethodTable(), resIDWhy);
}
}
@@ -5586,7 +5656,7 @@ void MethodTable::DoFullyLoad(Generics::RecursionGraph * const pVisited, const
// A transparenct type should not be allowed to derive from a critical type.
// However since this has never been enforced before we have many classes that
// violate this rule. Enforcing it now will be a breaking change.
- DoAccessibilityCheck(this, pParentMT, E_ACCESSDENIED, /* checkTargetTypeTransparency*/ FALSE);
+ DoAccessibilityCheck(this, pParentMT, E_ACCESSDENIED);
}
}
}
@@ -5605,7 +5675,7 @@ void MethodTable::DoFullyLoad(Generics::RecursionGraph * const pVisited, const
// A transparenct type should not be allowed to implement a critical interface.
// However since this has never been enforced before we have many classes that
// violate this rule. Enforcing it now will be a breaking change.
- DoAccessibilityCheck(this, it.GetInterface(), IDS_CLASSLOAD_INTERFACE_NO_ACCESS, /* checkTargetTypeTransparency*/ FALSE);
+ DoAccessibilityCheck(this, it.GetInterface(), IDS_CLASSLOAD_INTERFACE_NO_ACCESS);
}
}
}
@@ -5644,7 +5714,7 @@ void MethodTable::DoFullyLoad(Generics::RecursionGraph * const pVisited, const
if (fNeedAccessChecks)
{
- DoAccessibilityCheck(this, th.GetMethodTable(), E_ACCESSDENIED, FALSE);
+ DoAccessibilityCheck(this, th.GetMethodTable(), E_ACCESSDENIED);
}
}
@@ -5953,9 +6023,9 @@ void MethodTable::DoRestoreTypeKey()
// If we have an indirection cell then restore the m_pCanonMT and its module pointer
//
- if (union_getLowBits(m_pCanonMT) == UNION_INDIRECTION)
+ if (union_getLowBits(m_pCanonMT.GetValue()) == UNION_INDIRECTION)
{
- Module::RestoreMethodTablePointerRaw((MethodTable **)(union_getPointer(m_pCanonMT)),
+ Module::RestoreMethodTablePointerRaw((MethodTable **)(union_getPointer(m_pCanonMT.GetValue())),
GetLoaderModule(), CLASS_LOAD_UNRESTORED);
}
@@ -6031,7 +6101,7 @@ void MethodTable::Restore()
//
// Restore parent method table
//
- Module::RestoreMethodTablePointerRaw(GetParentMethodTablePtr(), GetLoaderModule(), CLASS_LOAD_APPROXPARENTS);
+ Module::RestoreMethodTablePointer(&m_pParentMethodTable, GetLoaderModule(), CLASS_LOAD_APPROXPARENTS);
//
// Restore interface classes
@@ -6192,7 +6262,7 @@ BOOL MethodTable::IsWinRTObjectType()
//==========================================================================================
// Return a pointer to the dictionary for an instantiated type
// Return NULL if not instantiated
-Dictionary* MethodTable::GetDictionary()
+PTR_Dictionary MethodTable::GetDictionary()
{
LIMITED_METHOD_DAC_CONTRACT;
@@ -6200,7 +6270,8 @@ Dictionary* MethodTable::GetDictionary()
{
// The instantiation for this class is stored in the type slots table
// *after* any inherited slots
- return GetPerInstInfo()[GetNumDicts()-1];
+ TADDR base = dac_cast<TADDR>(&(GetPerInstInfo()[GetNumDicts()-1]));
+ return PerInstInfoElem_t::GetValueMaybeNullAtPtr(base);
}
else
{
@@ -6217,7 +6288,8 @@ Instantiation MethodTable::GetInstantiation()
if (HasInstantiation())
{
PTR_GenericsDictInfo pDictInfo = GetGenericsDictInfo();
- return Instantiation(GetPerInstInfo()[pDictInfo->m_wNumDicts-1]->GetInstantiation(), pDictInfo->m_wNumTyPars);
+ TADDR base = dac_cast<TADDR>(&(GetPerInstInfo()[pDictInfo->m_wNumDicts-1]));
+ return Instantiation(PerInstInfoElem_t::GetValueMaybeNullAtPtr(base)->GetInstantiation(), pDictInfo->m_wNumTyPars);
}
else
{
@@ -7889,7 +7961,7 @@ BOOL MethodTable::SanityCheck()
// strings have component size2, all other non-arrays should have 0
_ASSERTE((GetComponentSize() <= 2) || IsArray());
- if (m_pEEClass == NULL)
+ if (m_pEEClass.IsNull())
{
if (IsAsyncPinType())
{
@@ -8029,7 +8101,7 @@ ClassCtorInfoEntry* MethodTable::GetClassCtorInfoIfExists()
if (HasBoxedRegularStatics())
{
ModuleCtorInfo *pModuleCtorInfo = GetZapModule()->GetZapModuleCtorInfo();
- DPTR(PTR_MethodTable) ppMT = pModuleCtorInfo->ppMT;
+ DPTR(RelativePointer<PTR_MethodTable>) ppMT = pModuleCtorInfo->ppMT;
PTR_DWORD hotHashOffsets = pModuleCtorInfo->hotHashOffsets;
PTR_DWORD coldHashOffsets = pModuleCtorInfo->coldHashOffsets;
@@ -8040,8 +8112,8 @@ ClassCtorInfoEntry* MethodTable::GetClassCtorInfoIfExists()
for (DWORD i = hotHashOffsets[hash]; i != hotHashOffsets[hash + 1]; i++)
{
- _ASSERTE(ppMT[i]);
- if (dac_cast<TADDR>(ppMT[i]) == dac_cast<TADDR>(this))
+ _ASSERTE(!ppMT[i].IsNull());
+ if (dac_cast<TADDR>(pModuleCtorInfo->GetMT(i)) == dac_cast<TADDR>(this))
{
return pModuleCtorInfo->cctorInfoHot + i;
}
@@ -8055,8 +8127,8 @@ ClassCtorInfoEntry* MethodTable::GetClassCtorInfoIfExists()
for (DWORD i = coldHashOffsets[hash]; i != coldHashOffsets[hash + 1]; i++)
{
- _ASSERTE(ppMT[i]);
- if (dac_cast<TADDR>(ppMT[i]) == dac_cast<TADDR>(this))
+ _ASSERTE(!ppMT[i].IsNull());
+ if (dac_cast<TADDR>(pModuleCtorInfo->GetMT(i)) == dac_cast<TADDR>(this))
{
return pModuleCtorInfo->cctorInfoCold + (i - pModuleCtorInfo->numElementsHot);
}
@@ -8080,13 +8152,8 @@ BOOL MethodTable::IsParentMethodTablePointerValid()
if (!GetWriteableData_NoLogging()->IsParentMethodTablePointerValid())
return FALSE;
- if (!GetFlag(enum_flag_HasIndirectParent))
- {
- return TRUE;
- }
- TADDR pMT;
- pMT = *PTR_TADDR(m_pParentMethodTable + offsetof(MethodTable, m_pParentMethodTable));
- return !CORCOMPILE_IS_POINTER_TAGGED(pMT);
+ TADDR base = dac_cast<TADDR>(this) + offsetof(MethodTable, m_pParentMethodTable);
+ return !m_pParentMethodTable.IsTagged(base);
}
#endif
@@ -9461,9 +9528,10 @@ MethodTable::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
DacEnumMemoryRegion(dac_cast<TADDR>(it.GetIndirectionSlot()), it.GetSize());
}
- if (m_pWriteableData.IsValid())
+ PTR_MethodTableWriteableData pWriteableData = ReadPointer(this, &MethodTable::m_pWriteableData);
+ if (pWriteableData.IsValid())
{
- m_pWriteableData.EnumMem();
+ pWriteableData.EnumMem();
}
if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
@@ -9651,24 +9719,21 @@ void MethodTable::SetSlot(UINT32 slotNumber, PCODE slotCode)
if (!IsCanonicalMethodTable())
{
- if (GetVtableIndirections()[indirectionIndex] == GetCanonicalMethodTable()->GetVtableIndirections()[indirectionIndex])
+ if (GetVtableIndirections()[indirectionIndex].GetValueMaybeNull() == GetCanonicalMethodTable()->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull())
fSharedVtableChunk = TRUE;
}
if (slotNumber < GetNumParentVirtuals())
{
- if (GetVtableIndirections()[indirectionIndex] == GetParentMethodTable()->GetVtableIndirections()[indirectionIndex])
+ if (GetVtableIndirections()[indirectionIndex].GetValueMaybeNull() == GetParentMethodTable()->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull())
fSharedVtableChunk = TRUE;
}
if (fSharedVtableChunk)
{
MethodDesc* pMD = GetMethodDescForSlotAddress(slotCode);
-#ifndef FEATURE_INTERPRETER
- // TBD: Make this take a "stable" debug arg, determining whether to make these assertions.
_ASSERTE(pMD->HasStableEntryPoint());
_ASSERTE(pMD->GetStableEntryPoint() == slotCode);
-#endif // FEATURE_INTERPRETER
}
}
#endif
@@ -9929,8 +9994,6 @@ bool MethodTable::ClassRequiresUnmanagedCodeCheck()
return false;
}
-#endif // !DACCESS_COMPILE
-
BOOL MethodTable::Validate()
@@ -9940,13 +10003,14 @@ BOOL MethodTable::Validate()
ASSERT_AND_CHECK(SanityCheck());
#ifdef _DEBUG
- if (m_pWriteableData == NULL)
+ if (m_pWriteableData.IsNull())
{
_ASSERTE(IsAsyncPinType());
return TRUE;
}
- DWORD dwLastVerifiedGCCnt = m_pWriteableData->m_dwLastVerifedGCCnt;
+ MethodTableWriteableData *pWriteableData = m_pWriteableData.GetValue();
+ DWORD dwLastVerifiedGCCnt = pWriteableData->m_dwLastVerifedGCCnt;
// Here we used to assert that (dwLastVerifiedGCCnt <= GCHeapUtilities::GetGCHeap()->GetGcCount()) but
// this is no longer true because with background gc. Since the purpose of having
// m_dwLastVerifedGCCnt is just to only verify the same method table once for each GC
@@ -9977,13 +10041,15 @@ BOOL MethodTable::Validate()
#ifdef _DEBUG
// It is not a fatal error to fail the update the counter. We will run slower and retry next time,
// but the system will function properly.
- if (EnsureWritablePagesNoThrow(m_pWriteableData, sizeof(MethodTableWriteableData)))
- m_pWriteableData->m_dwLastVerifedGCCnt = GCHeapUtilities::GetGCHeap()->GetGcCount();
+ if (EnsureWritablePagesNoThrow(pWriteableData, sizeof(MethodTableWriteableData)))
+ pWriteableData->m_dwLastVerifedGCCnt = GCHeapUtilities::GetGCHeap()->GetGcCount();
#endif //_DEBUG
return TRUE;
}
+#endif // !DACCESS_COMPILE
+
NOINLINE BYTE *MethodTable::GetLoaderAllocatorObjectForGC()
{
WRAPPER_NO_CONTRACT;
diff --git a/src/vm/methodtable.h b/src/vm/methodtable.h
index 118a883b6e..60059675e9 100644
--- a/src/vm/methodtable.h
+++ b/src/vm/methodtable.h
@@ -110,25 +110,40 @@ struct InterfaceInfo_t
friend class NativeImageDumper;
#endif
- FixupPointer<PTR_MethodTable> m_pMethodTable; // Method table of the interface
+ // Method table of the interface
+#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
+ RelativeFixupPointer<PTR_MethodTable> m_pMethodTable;
+#else
+ FixupPointer<PTR_MethodTable> m_pMethodTable;
+#endif
public:
FORCEINLINE PTR_MethodTable GetMethodTable()
{
LIMITED_METHOD_CONTRACT;
- return m_pMethodTable.GetValue();
+ return ReadPointerMaybeNull(this, &InterfaceInfo_t::m_pMethodTable);
}
#ifndef DACCESS_COMPILE
void SetMethodTable(MethodTable * pMT)
{
LIMITED_METHOD_CONTRACT;
- m_pMethodTable.SetValue(pMT);
+ m_pMethodTable.SetValueMaybeNull(pMT);
}
// Get approximate method table. This is used by the type loader before the type is fully loaded.
PTR_MethodTable GetApproxMethodTable(Module * pContainingModule);
-#endif
+#endif // !DACCESS_COMPILE
+
+#ifndef DACCESS_COMPILE
+ InterfaceInfo_t(InterfaceInfo_t &right)
+ {
+ m_pMethodTable.SetValueMaybeNull(right.m_pMethodTable.GetValueMaybeNull());
+ }
+#else // !DACCESS_COMPILE
+private:
+ InterfaceInfo_t(InterfaceInfo_t &right);
+#endif // !DACCESS_COMPILE
}; // struct InterfaceInfo_t
typedef DPTR(InterfaceInfo_t) PTR_InterfaceInfo;
@@ -247,7 +262,7 @@ typedef DPTR(GenericsDictInfo) PTR_GenericsDictInfo;
struct GenericsStaticsInfo
{
// Pointer to field descs for statics
- PTR_FieldDesc m_pFieldDescs;
+ RelativePointer<PTR_FieldDesc> m_pFieldDescs;
// Method table ID for statics
SIZE_T m_DynamicTypeID;
@@ -390,6 +405,9 @@ struct MethodTableWriteableData
enum_flag_SkipWinRTOverride = 0x00000100, // No WinRT override is needed
+ enum_flag_CanCompareBitsOrUseFastGetHashCode = 0x00000200, // Is any field type or sub field type overrode Equals or GetHashCode
+ enum_flag_HasCheckedCanCompareBitsOrUseFastGetHashCode = 0x00000400, // Whether we have checked the overridden Equals or GetHashCode
+
#ifdef FEATURE_PREJIT
// These flags are used only at ngen time. We store them here since
// we are running out of available flags in MethodTable. They may eventually
@@ -1235,6 +1253,41 @@ public:
WRAPPER_NO_CONTRACT;
FastInterlockOr(EnsureWritablePages(&GetWriteableDataForWrite_NoLogging()->m_dwFlags), MethodTableWriteableData::enum_flag_SkipWinRTOverride);
}
+
+ inline BOOL CanCompareBitsOrUseFastGetHashCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (GetWriteableData_NoLogging()->m_dwFlags & MethodTableWriteableData::enum_flag_CanCompareBitsOrUseFastGetHashCode);
+ }
+
+ // If canCompare is true, this method ensure an atomic operation for setting
+ // enum_flag_HasCheckedCanCompareBitsOrUseFastGetHashCode and enum_flag_CanCompareBitsOrUseFastGetHashCode flags.
+ inline void SetCanCompareBitsOrUseFastGetHashCode(BOOL canCompare)
+ {
+ WRAPPER_NO_CONTRACT
+ if (canCompare)
+ {
+ // Set checked and canCompare flags in one interlocked operation.
+ FastInterlockOr(EnsureWritablePages(&GetWriteableDataForWrite_NoLogging()->m_dwFlags),
+ MethodTableWriteableData::enum_flag_HasCheckedCanCompareBitsOrUseFastGetHashCode | MethodTableWriteableData::enum_flag_CanCompareBitsOrUseFastGetHashCode);
+ }
+ else
+ {
+ SetHasCheckedCanCompareBitsOrUseFastGetHashCode();
+ }
+ }
+
+ inline BOOL HasCheckedCanCompareBitsOrUseFastGetHashCode()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return (GetWriteableData_NoLogging()->m_dwFlags & MethodTableWriteableData::enum_flag_HasCheckedCanCompareBitsOrUseFastGetHashCode);
+ }
+
+ inline void SetHasCheckedCanCompareBitsOrUseFastGetHashCode()
+ {
+ WRAPPER_NO_CONTRACT;
+ FastInterlockOr(EnsureWritablePages(&GetWriteableDataForWrite_NoLogging()->m_dwFlags), MethodTableWriteableData::enum_flag_HasCheckedCanCompareBitsOrUseFastGetHashCode);
+ }
inline void SetIsDependenciesLoaded()
{
@@ -1498,7 +1551,10 @@ public:
CONSISTENCY_CHECK(slotNum < GetNumVirtuals());
// Virtual slots live in chunks pointed to by vtable indirections
- return *(GetVtableIndirections()[GetIndexOfVtableIndirection(slotNum)] + GetIndexAfterVtableIndirection(slotNum));
+
+ DWORD index = GetIndexOfVtableIndirection(slotNum);
+ TADDR base = dac_cast<TADDR>(&(GetVtableIndirections()[index]));
+ return *(VTableIndir_t::GetValueMaybeNullAtPtr(base) + GetIndexAfterVtableIndirection(slotNum));
}
PTR_PCODE GetSlotPtrRaw(UINT32 slotNum)
@@ -1510,7 +1566,9 @@ public:
if (slotNum < GetNumVirtuals())
{
// Virtual slots live in chunks pointed to by vtable indirections
- return GetVtableIndirections()[GetIndexOfVtableIndirection(slotNum)] + GetIndexAfterVtableIndirection(slotNum);
+ DWORD index = GetIndexOfVtableIndirection(slotNum);
+ TADDR base = dac_cast<TADDR>(&(GetVtableIndirections()[index]));
+ return VTableIndir_t::GetValueMaybeNullAtPtr(base) + GetIndexAfterVtableIndirection(slotNum);
}
else if (HasSingleNonVirtualSlot())
{
@@ -1594,12 +1652,18 @@ public:
#define VTABLE_SLOTS_PER_CHUNK 8
#define VTABLE_SLOTS_PER_CHUNK_LOG2 3
+#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
+ typedef RelativePointer<PTR_PCODE> VTableIndir_t;
+#else
+ typedef PlainPointer<PTR_PCODE> VTableIndir_t;
+#endif
+
static DWORD GetIndexOfVtableIndirection(DWORD slotNum);
static DWORD GetStartSlotForVtableIndirection(UINT32 indirectionIndex, DWORD wNumVirtuals);
static DWORD GetEndSlotForVtableIndirection(UINT32 indirectionIndex, DWORD wNumVirtuals);
static UINT32 GetIndexAfterVtableIndirection(UINT32 slotNum);
static DWORD GetNumVtableIndirections(DWORD wNumVirtuals);
- PTR_PTR_PCODE GetVtableIndirections();
+ DPTR(VTableIndir_t) GetVtableIndirections();
DWORD GetNumVtableIndirections();
class VtableIndirectionSlotIterator
@@ -1607,7 +1671,7 @@ public:
friend class MethodTable;
private:
- PTR_PTR_PCODE m_pSlot;
+ DPTR(VTableIndir_t) m_pSlot;
DWORD m_i;
DWORD m_count;
PTR_MethodTable m_pMT;
@@ -2004,8 +2068,18 @@ public:
LIMITED_METHOD_CONTRACT;
SetFlag(enum_flag_IsHFA);
}
+#else // !FEATURE_HFA
+ bool IsHFA();
#endif // FEATURE_HFA
+ // Get the HFA type. This is supported both with FEATURE_HFA, in which case it
+ // depends on the cached bit on the class, or without, in which case it is recomputed
+ // for each invocation.
+ CorElementType GetHFAType();
+ // The managed and unmanaged HFA type can differ for types with layout. The following two methods return the unmanaged HFA type.
+ bool IsNativeHFA();
+ CorElementType GetNativeHFAType();
+
#if defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
inline bool IsRegPassedStruct()
{
@@ -2020,15 +2094,6 @@ public:
}
#endif // defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
-#ifdef FEATURE_HFA
-
- CorElementType GetHFAType();
-
- // The managed and unmanaged HFA type can differ for types with layout. The following two methods return the unmanaged HFA type.
- bool IsNativeHFA();
- CorElementType GetNativeHFAType();
-#endif // FEATURE_HFA
-
#ifdef FEATURE_64BIT_ALIGNMENT
// Returns true iff the native view of this type requires 64-bit aligment.
bool NativeRequiresAlign8();
@@ -2100,6 +2165,12 @@ public:
// THE METHOD TABLE PARENT (SUPERCLASS/BASE CLASS)
//
+#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
+ typedef RelativeFixupPointer<PTR_MethodTable> ParentMT_t;
+#else
+ typedef PlainPointer<PTR_MethodTable> ParentMT_t;
+#endif
+
BOOL HasApproxParent()
{
LIMITED_METHOD_DAC_CONTRACT;
@@ -2118,32 +2189,24 @@ public:
LIMITED_METHOD_DAC_CONTRACT;
PRECONDITION(IsParentMethodTablePointerValid());
-
- TADDR pMT = m_pParentMethodTable;
-#ifdef FEATURE_PREJIT
- if (GetFlag(enum_flag_HasIndirectParent))
- pMT = *PTR_TADDR(m_pParentMethodTable + offsetof(MethodTable, m_pParentMethodTable));
-#endif
- return PTR_MethodTable(pMT);
+ return ReadPointerMaybeNull(this, &MethodTable::m_pParentMethodTable);
}
- inline static PTR_VOID GetParentMethodTableOrIndirection(PTR_VOID pMT)
+ inline static PTR_VOID GetParentMethodTable(PTR_VOID pMT)
{
- WRAPPER_NO_CONTRACT;
- return PTR_VOID(*PTR_TADDR(dac_cast<TADDR>(pMT) + offsetof(MethodTable, m_pParentMethodTable)));
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ PTR_MethodTable pMethodTable = dac_cast<PTR_MethodTable>(pMT);
+ return pMethodTable->GetParentMethodTable();
}
- inline MethodTable ** GetParentMethodTablePtr()
+#ifndef DACCESS_COMPILE
+ inline ParentMT_t * GetParentMethodTablePlainOrRelativePointerPtr()
{
- WRAPPER_NO_CONTRACT;
-
-#ifdef FEATURE_PREJIT
- return GetFlag(enum_flag_HasIndirectParent) ?
- (MethodTable **)(m_pParentMethodTable + offsetof(MethodTable, m_pParentMethodTable)) :(MethodTable **)&m_pParentMethodTable;
-#else
- return (MethodTable **)&m_pParentMethodTable;
-#endif
+ LIMITED_METHOD_CONTRACT;
+ return &m_pParentMethodTable;
}
+#endif // !DACCESS_COMPILE
// Is the parent method table pointer equal to the given argument?
BOOL ParentEquals(PTR_MethodTable pMT)
@@ -2162,8 +2225,8 @@ public:
void SetParentMethodTable (MethodTable *pParentMethodTable)
{
LIMITED_METHOD_CONTRACT;
- PRECONDITION(!GetFlag(enum_flag_HasIndirectParent));
- m_pParentMethodTable = (TADDR)pParentMethodTable;
+ PRECONDITION(!m_pParentMethodTable.IsIndirectPtrMaybeNull());
+ m_pParentMethodTable.SetValueMaybeNull(pParentMethodTable);
#ifdef _DEBUG
GetWriteableDataForWrite_NoLogging()->SetParentMethodTablePointerValid();
#endif
@@ -2209,12 +2272,12 @@ public:
inline void SetClass(EEClass *pClass)
{
LIMITED_METHOD_CONTRACT;
- m_pEEClass = pClass;
+ m_pEEClass.SetValue(pClass);
}
inline void SetCanonicalMethodTable(MethodTable * pMT)
{
- m_pCanonMT = (TADDR)pMT | MethodTable::UNION_METHODTABLE;
+ m_pCanonMT.SetValue((TADDR)pMT | MethodTable::UNION_METHODTABLE);
}
#endif
@@ -2647,7 +2710,7 @@ public:
{
WRAPPER_NO_CONTRACT;
_ASSERTE(HasGenericsStaticsInfo());
- return GetGenericsStaticsInfo()->m_pFieldDescs;
+ return ReadPointerMaybeNull((GenericsStaticsInfo *)GetGenericsStaticsInfo(), &GenericsStaticsInfo::m_pFieldDescs);
}
BOOL HasCrossModuleGenericStaticsInfo()
@@ -3000,12 +3063,20 @@ public:
// must have a dictionary entry. On the other hand, for instantiations shared with Dict<string,double> the opposite holds.
//
+#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
+ typedef RelativePointer<PTR_Dictionary> PerInstInfoElem_t;
+ typedef RelativePointer<DPTR(PerInstInfoElem_t)> PerInstInfo_t;
+#else
+ typedef PlainPointer<PTR_Dictionary> PerInstInfoElem_t;
+ typedef PlainPointer<DPTR(PerInstInfoElem_t)> PerInstInfo_t;
+#endif
+
// Return a pointer to the per-instantiation information. See field itself for comments.
- DPTR(PTR_Dictionary) GetPerInstInfo()
+ DPTR(PerInstInfoElem_t) GetPerInstInfo()
{
LIMITED_METHOD_DAC_CONTRACT;
_ASSERTE(HasPerInstInfo());
- return dac_cast<DPTR(PTR_Dictionary)>(m_pMultipurposeSlot1);
+ return ReadPointer(this, &MethodTable::m_pPerInstInfo);
}
BOOL HasPerInstInfo()
{
@@ -3013,15 +3084,20 @@ public:
return GetFlag(enum_flag_HasPerInstInfo) && !IsArray();
}
#ifndef DACCESS_COMPILE
+ static inline bool IsPerInstInfoRelative()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return decltype(m_pPerInstInfo)::isRelative;
+ }
static inline DWORD GetOffsetOfPerInstInfo()
{
LIMITED_METHOD_CONTRACT;
return offsetof(MethodTable, m_pPerInstInfo);
}
- void SetPerInstInfo(Dictionary** pPerInstInfo)
+ void SetPerInstInfo(PerInstInfoElem_t *pPerInstInfo)
{
LIMITED_METHOD_CONTRACT;
- m_pPerInstInfo = pPerInstInfo;
+ m_pPerInstInfo.SetValue(pPerInstInfo);
}
void SetDictInfo(WORD numDicts, WORD numTyPars)
{
@@ -3041,7 +3117,7 @@ public:
// Get a pointer to the dictionary for this instantiated type
// (The instantiation is stored in the initial slots of the dictionary)
// If not instantiated, return NULL
- Dictionary* GetDictionary();
+ PTR_Dictionary GetDictionary();
#ifdef FEATURE_PREJIT
//
@@ -3127,36 +3203,39 @@ public:
// Private part of MethodTable
// ------------------------------------------------------------------
+#ifndef DACCESS_COMPILE
inline void SetWriteableData(PTR_MethodTableWriteableData pMTWriteableData)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(pMTWriteableData);
- m_pWriteableData = pMTWriteableData;
+ m_pWriteableData.SetValue(pMTWriteableData);
}
-
+#endif
+
inline PTR_Const_MethodTableWriteableData GetWriteableData() const
{
LIMITED_METHOD_DAC_CONTRACT;
g_IBCLogger.LogMethodTableWriteableDataAccess(this);
- return m_pWriteableData;
+ return GetWriteableData_NoLogging();
}
inline PTR_Const_MethodTableWriteableData GetWriteableData_NoLogging() const
{
LIMITED_METHOD_DAC_CONTRACT;
- return m_pWriteableData;
+ return ReadPointer(this, &MethodTable::m_pWriteableData);
}
inline PTR_MethodTableWriteableData GetWriteableDataForWrite()
{
- LIMITED_METHOD_CONTRACT;
+ LIMITED_METHOD_DAC_CONTRACT;
g_IBCLogger.LogMethodTableWriteableDataWriteAccess(this);
- return m_pWriteableData;
+ return GetWriteableDataForWrite_NoLogging();
}
inline PTR_MethodTableWriteableData GetWriteableDataForWrite_NoLogging()
{
- return m_pWriteableData;
+ LIMITED_METHOD_DAC_CONTRACT;
+ return ReadPointer(this, &MethodTable::m_pWriteableData);
}
//-------------------------------------------------------------------
@@ -4050,11 +4129,15 @@ private:
// if enum_flag_enum_flag_HasIndirectParent is set. The indirection is offset by offsetof(MethodTable, m_pParentMethodTable).
// It allows casting helpers to go through parent chain natually. Casting helper do not need need the explicit check
// for enum_flag_HasIndirectParentMethodTable.
- TADDR m_pParentMethodTable;
+ ParentMT_t m_pParentMethodTable;
- PTR_Module m_pLoaderModule; // LoaderModule. It is equal to the ZapModule in ngened images
+ RelativePointer<PTR_Module> m_pLoaderModule; // LoaderModule. It is equal to the ZapModule in ngened images
- PTR_MethodTableWriteableData m_pWriteableData;
+#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
+ RelativePointer<PTR_MethodTableWriteableData> m_pWriteableData;
+#else
+ PlainPointer<PTR_MethodTableWriteableData> m_pWriteableData;
+#endif
// The value of lowest two bits describe what the union contains
enum LowBits {
@@ -4066,8 +4149,13 @@ private:
static const TADDR UNION_MASK = 3;
union {
- EEClass * m_pEEClass;
- TADDR m_pCanonMT;
+#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
+ RelativePointer<DPTR(EEClass)> m_pEEClass;
+ RelativePointer<TADDR> m_pCanonMT;
+#else
+ PlainPointer<DPTR(EEClass)> m_pEEClass;
+ PlainPointer<TADDR> m_pCanonMT;
+#endif
};
__forceinline static LowBits union_getLowBits(TADDR pCanonMT)
@@ -4089,14 +4177,18 @@ private:
union
{
- PTR_Dictionary * m_pPerInstInfo;
- TADDR m_ElementTypeHnd;
- TADDR m_pMultipurposeSlot1;
+ PerInstInfo_t m_pPerInstInfo;
+ TADDR m_ElementTypeHnd;
+ TADDR m_pMultipurposeSlot1;
};
public:
union
{
- InterfaceInfo_t * m_pInterfaceMap;
+#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
+ RelativePointer<PTR_InterfaceInfo> m_pInterfaceMap;
+#else
+ PlainPointer<PTR_InterfaceInfo> m_pInterfaceMap;
+#endif
TADDR m_pMultipurposeSlot2;
};
diff --git a/src/vm/methodtable.inl b/src/vm/methodtable.inl
index a8a4d2301c..4fa81c931b 100644
--- a/src/vm/methodtable.inl
+++ b/src/vm/methodtable.inl
@@ -23,24 +23,26 @@ inline PTR_EEClass MethodTable::GetClass_NoLogging()
{
LIMITED_METHOD_DAC_CONTRACT;
+ TADDR addr = ReadPointer(this, &MethodTable::m_pCanonMT);
+
#ifdef _DEBUG
- LowBits lowBits = union_getLowBits(m_pCanonMT);
+ LowBits lowBits = union_getLowBits(addr);
if (lowBits == UNION_EECLASS)
{
- return PTR_EEClass(m_pCanonMT);
+ return PTR_EEClass(addr);
}
else if (lowBits == UNION_METHODTABLE)
{
// pointer to canonical MethodTable.
- TADDR canonicalMethodTable = union_getPointer(m_pCanonMT);
- return PTR_EEClass(PTR_MethodTable(canonicalMethodTable)->m_pCanonMT);
+ TADDR canonicalMethodTable = union_getPointer(addr);
+ return PTR_EEClass(ReadPointer((MethodTable *) PTR_MethodTable(canonicalMethodTable), &MethodTable::m_pCanonMT));
}
#ifdef FEATURE_PREJIT
else if (lowBits == UNION_INDIRECTION)
{
// pointer to indirection cell that points to canonical MethodTable
- TADDR canonicalMethodTable = *PTR_TADDR(union_getPointer(m_pCanonMT));
- return PTR_EEClass(PTR_MethodTable(canonicalMethodTable)->m_pCanonMT);
+ TADDR canonicalMethodTable = *PTR_TADDR(union_getPointer(addr));
+ return PTR_EEClass(ReadPointer((MethodTable *) PTR_MethodTable(canonicalMethodTable), &MethodTable::m_pCanonMT));
}
#endif
#ifdef DACCESS_COMPILE
@@ -52,8 +54,6 @@ inline PTR_EEClass MethodTable::GetClass_NoLogging()
#else
- TADDR addr = m_pCanonMT;
-
if ((addr & 2) == 0)
{
// pointer to EEClass
@@ -65,12 +65,12 @@ inline PTR_EEClass MethodTable::GetClass_NoLogging()
{
// pointer to indirection cell that points to canonical MethodTable
TADDR canonicalMethodTable = *PTR_TADDR(addr - 3);
- return PTR_EEClass(PTR_MethodTable(canonicalMethodTable)->m_pCanonMT);
+ return PTR_EEClass(ReadPointer((MethodTable *) PTR_MethodTable(canonicalMethodTable), &MethodTable::m_pCanonMT));
}
#endif
// pointer to canonical MethodTable.
- return PTR_EEClass(PTR_MethodTable(addr - 2)->m_pCanonMT);
+ return PTR_EEClass(ReadPointer((MethodTable *) PTR_MethodTable(addr - 2), &MethodTable::m_pCanonMT));
#endif
}
@@ -113,25 +113,27 @@ inline BOOL MethodTable::IsClassPointerValid()
WRAPPER_NO_CONTRACT;
SUPPORTS_DAC;
- LowBits lowBits = union_getLowBits(m_pCanonMT);
+ TADDR addr = ReadPointer(this, &MethodTable::m_pCanonMT);
+
+ LowBits lowBits = union_getLowBits(addr);
if (lowBits == UNION_EECLASS)
{
- return (m_pEEClass != NULL);
+ return !m_pEEClass.IsNull();
}
else if (lowBits == UNION_METHODTABLE)
{
// pointer to canonical MethodTable.
- TADDR canonicalMethodTable = union_getPointer(m_pCanonMT);
- return (PTR_MethodTable(canonicalMethodTable)->m_pEEClass != NULL);
+ TADDR canonicalMethodTable = union_getPointer(addr);
+ return !PTR_MethodTable(canonicalMethodTable)->m_pEEClass.IsNull();
}
#ifdef FEATURE_PREJIT
else if (lowBits == UNION_INDIRECTION)
{
// pointer to indirection cell that points to canonical MethodTable
- TADDR canonicalMethodTable = *PTR_TADDR(union_getPointer(m_pCanonMT));
+ TADDR canonicalMethodTable = *PTR_TADDR(union_getPointer(addr));
if (CORCOMPILE_IS_POINTER_TAGGED(canonicalMethodTable))
return FALSE;
- return (PTR_MethodTable(canonicalMethodTable)->m_pEEClass != NULL);
+ return !PTR_MethodTable(canonicalMethodTable)->m_pEEClass.IsNull();
}
#endif
_ASSERTE(!"Malformed m_pEEClass in MethodTable");
@@ -161,7 +163,7 @@ inline PTR_Module MethodTable::GetZapModule()
PTR_Module zapModule = NULL;
if (IsZapped())
{
- zapModule = m_pLoaderModule;
+ zapModule = ReadPointer(this, &MethodTable::m_pLoaderModule);
}
return zapModule;
@@ -171,7 +173,7 @@ inline PTR_Module MethodTable::GetZapModule()
inline PTR_Module MethodTable::GetLoaderModule()
{
LIMITED_METHOD_DAC_CONTRACT;
- return m_pLoaderModule;
+ return ReadPointer(this, &MethodTable::m_pLoaderModule);
}
inline PTR_LoaderAllocator MethodTable::GetLoaderAllocator()
@@ -187,7 +189,7 @@ inline PTR_LoaderAllocator MethodTable::GetLoaderAllocator()
inline void MethodTable::SetLoaderModule(Module* pModule)
{
WRAPPER_NO_CONTRACT;
- m_pLoaderModule = pModule;
+ m_pLoaderModule.SetValue(pModule);
}
inline void MethodTable::SetLoaderAllocator(LoaderAllocator* pAllocator)
@@ -884,10 +886,10 @@ inline DWORD MethodTable::GetNumVtableIndirections(DWORD wNumVirtuals)
}
//==========================================================================================
-inline PTR_PTR_PCODE MethodTable::GetVtableIndirections()
+inline DPTR(MethodTable::VTableIndir_t) MethodTable::GetVtableIndirections()
{
LIMITED_METHOD_DAC_CONTRACT;
- return dac_cast<PTR_PTR_PCODE>(dac_cast<TADDR>(this) + sizeof(MethodTable));
+ return dac_cast<DPTR(VTableIndir_t)>(dac_cast<TADDR>(this) + sizeof(MethodTable));
}
//==========================================================================================
@@ -949,7 +951,7 @@ inline DWORD MethodTable::VtableIndirectionSlotIterator::GetOffsetFromMethodTabl
WRAPPER_NO_CONTRACT;
PRECONDITION(m_i != (DWORD) -1 && m_i < m_count);
- return GetVtableOffset() + sizeof(PTR_PCODE) * m_i;
+ return GetVtableOffset() + sizeof(VTableIndir_t) * m_i;
}
//==========================================================================================
@@ -958,7 +960,7 @@ inline PTR_PCODE MethodTable::VtableIndirectionSlotIterator::GetIndirectionSlot(
LIMITED_METHOD_DAC_CONTRACT;
PRECONDITION(m_i != (DWORD) -1 && m_i < m_count);
- return *m_pSlot;
+ return m_pSlot->GetValueMaybeNull(dac_cast<TADDR>(m_pSlot));
}
//==========================================================================================
@@ -966,7 +968,7 @@ inline PTR_PCODE MethodTable::VtableIndirectionSlotIterator::GetIndirectionSlot(
inline void MethodTable::VtableIndirectionSlotIterator::SetIndirectionSlot(PTR_PCODE pChunk)
{
LIMITED_METHOD_CONTRACT;
- *m_pSlot = pChunk;
+ m_pSlot->SetValueMaybeNull(pChunk);
}
#endif
@@ -1144,8 +1146,10 @@ inline PTR_MethodTable MethodTable::GetCanonicalMethodTable()
{
LIMITED_METHOD_DAC_CONTRACT;
+ TADDR addr = ReadPointer(this, &MethodTable::m_pCanonMT);
+
#ifdef _DEBUG
- LowBits lowBits = union_getLowBits(m_pCanonMT);
+ LowBits lowBits = union_getLowBits(addr);
if (lowBits == UNION_EECLASS)
{
return dac_cast<PTR_MethodTable>(this);
@@ -1153,18 +1157,17 @@ inline PTR_MethodTable MethodTable::GetCanonicalMethodTable()
else if (lowBits == UNION_METHODTABLE)
{
// pointer to canonical MethodTable.
- return PTR_MethodTable(union_getPointer(m_pCanonMT));
+ return PTR_MethodTable(union_getPointer(addr));
}
#ifdef FEATURE_PREJIT
else if (lowBits == UNION_INDIRECTION)
{
- return PTR_MethodTable(*PTR_TADDR(union_getPointer(m_pCanonMT)));
+ return PTR_MethodTable(*PTR_TADDR(union_getPointer(addr)));
}
#endif
_ASSERTE(!"Malformed m_pCanonMT in MethodTable");
return NULL;
#else
- TADDR addr = m_pCanonMT;
if ((addr & 2) == 0)
return dac_cast<PTR_MethodTable>(this);
@@ -1184,11 +1187,12 @@ inline TADDR MethodTable::GetCanonicalMethodTableFixup()
LIMITED_METHOD_DAC_CONTRACT;
#ifdef FEATURE_PREJIT
- LowBits lowBits = union_getLowBits(m_pCanonMT);
+ TADDR addr = ReadPointer(this, &MethodTable::m_pCanonMT);
+ LowBits lowBits = union_getLowBits(addr);
if (lowBits == UNION_INDIRECTION)
{
// pointer to canonical MethodTable.
- return *PTR_TADDR(union_getPointer(m_pCanonMT));
+ return *PTR_TADDR(union_getPointer(addr));
}
else
#endif
@@ -1251,7 +1255,7 @@ inline BOOL MethodTable::HasExplicitSize()
inline DWORD MethodTable::GetPerInstInfoSize()
{
LIMITED_METHOD_DAC_CONTRACT;
- return GetNumDicts() * sizeof(TypeHandle*);
+ return GetNumDicts() * sizeof(PerInstInfoElem_t);
}
//==========================================================================================
@@ -1303,7 +1307,7 @@ inline BOOL MethodTable::IsCanonicalMethodTable()
{
LIMITED_METHOD_DAC_CONTRACT;
- return (union_getLowBits(m_pCanonMT) == UNION_EECLASS);
+ return (union_getLowBits(ReadPointer(this, &MethodTable::m_pCanonMT)) == UNION_EECLASS);
}
//==========================================================================================
@@ -1337,7 +1341,7 @@ inline PTR_InterfaceInfo MethodTable::GetInterfaceMap()
{
LIMITED_METHOD_DAC_CONTRACT;
- return dac_cast<PTR_InterfaceInfo>(m_pMultipurposeSlot2); // m_pInterfaceMap
+ return ReadPointer(this, &MethodTable::m_pInterfaceMap);
}
//==========================================================================================
@@ -1350,7 +1354,7 @@ FORCEINLINE TADDR MethodTable::GetMultipurposeSlotPtr(WFLAGS2_ENUM flag, const B
DWORD offset = offsets[GetFlag((WFLAGS2_ENUM)(flag - 1))];
if (offset >= sizeof(MethodTable)) {
- offset += GetNumVtableIndirections() * sizeof(PTR_PCODE);
+ offset += GetNumVtableIndirections() * sizeof(VTableIndir_t);
}
return dac_cast<TADDR>(this) + offset;
@@ -1365,7 +1369,7 @@ FORCEINLINE DWORD MethodTable::GetOffsetOfOptionalMember(OptionalMemberId id)
DWORD offset = c_OptionalMembersStartOffsets[GetFlag(enum_flag_MultipurposeSlotsMask)];
- offset += GetNumVtableIndirections() * sizeof(PTR_PCODE);
+ offset += GetNumVtableIndirections() * sizeof(VTableIndir_t);
#undef METHODTABLE_OPTIONAL_MEMBER
#define METHODTABLE_OPTIONAL_MEMBER(NAME, TYPE, GETTER) \
@@ -1732,7 +1736,7 @@ FORCEINLINE PTR_Module MethodTable::GetGenericsStaticsModuleAndID(DWORD * pID)
_ASSERTE(!IsStringOrArray());
if (m_dwFlags & enum_flag_StaticsMask_IfGenericsThenCrossModule)
{
- CrossModuleGenericsStaticsInfo *pInfo = m_pWriteableData->GetCrossModuleGenericsStaticsInfo();
+ CrossModuleGenericsStaticsInfo *pInfo = ReadPointer(this, &MethodTable::m_pWriteableData)->GetCrossModuleGenericsStaticsInfo();
_ASSERTE(FitsIn<DWORD>(pInfo->m_DynamicTypeID) || pInfo->m_DynamicTypeID == (SIZE_T)-1);
*pID = static_cast<DWORD>(pInfo->m_DynamicTypeID);
return pInfo->m_pModuleForStatics;
diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
index 3a738df496..f9876608eb 100644
--- a/src/vm/methodtablebuilder.cpp
+++ b/src/vm/methodtablebuilder.cpp
@@ -21,7 +21,6 @@
#include "encee.h"
#include "mdaassistants.h"
#include "ecmakey.h"
-#include "security.h"
#include "customattribute.h"
#include "typestring.h"
@@ -1822,7 +1821,7 @@ MethodTableBuilder::BuildMethodTableThrowing(
GetHalfBakedClass()->SetIsNotTightlyPacked();
#ifdef FEATURE_HFA
- CheckForHFA(pByValueClassCache);
+ GetHalfBakedClass()->CheckForHFA(pByValueClassCache);
#endif
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
#ifdef FEATURE_HFA
@@ -1844,7 +1843,7 @@ MethodTableBuilder::BuildMethodTableThrowing(
#ifdef FEATURE_HFA
if (HasLayout())
{
- CheckForNativeHFA();
+ GetHalfBakedClass()->CheckForNativeHFA();
}
#endif
@@ -4168,19 +4167,6 @@ VOID MethodTableBuilder::InitializeFieldDescs(FieldDesc *pFieldDescList,
pszFieldName
);
- // Check if the ValueType field containing non-publics is overlapped
- if (HasExplicitFieldOffsetLayout()
- && pLayoutFieldInfo != NULL
- && pLayoutFieldInfo->m_fIsOverlapped
- && pByValueClass != NULL
- && pByValueClass->GetClass()->HasNonPublicFields())
- {
- if (!Security::CanSkipVerification(GetAssembly()->GetDomainAssembly()))
- {
- BuildMethodTableThrowException(IDS_CLASSLOAD_BADOVERLAP);
- }
- }
-
// We're using FieldDesc::m_pMTOfEnclosingClass to temporarily store the field's size.
//
if (fIsByValue)
@@ -4281,14 +4267,6 @@ VOID MethodTableBuilder::InitializeFieldDescs(FieldDesc *pFieldDescList,
BAD_FORMAT_NOTHROW_ASSERT(!"ObjectRef in an RVA field");
BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
}
- if (pByValueClass->GetClass()->HasNonPublicFields())
- {
- if (!Security::CanHaveRVA(GetAssembly()))
- {
- BAD_FORMAT_NOTHROW_ASSERT(!"ValueType with non-public fields as a type of an RVA field");
- BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
- }
- }
}
}
@@ -4321,14 +4299,6 @@ VOID MethodTableBuilder::InitializeFieldDescs(FieldDesc *pFieldDescList,
{
fldSize = GetSizeForCorElementType(FieldDescElementType);
}
- if (!GetModule()->CheckRvaField(rva, fldSize))
- {
- if (!Security::CanHaveRVA(GetAssembly()))
- {
- BAD_FORMAT_NOTHROW_ASSERT(!"Illegal RVA of a mapped field");
- BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
- }
- }
pFD->SetOffsetRVA(rva);
}
@@ -4368,14 +4338,6 @@ VOID MethodTableBuilder::InitializeFieldDescs(FieldDesc *pFieldDescList,
BAD_FORMAT_NOTHROW_ASSERT(!"ObjectRef in an RVA self-referencing static field");
BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
}
- if (HasNonPublicFields())
- { // RVA ValueTypes with non-public fields must be checked against security
- if (!Security::CanHaveRVA(GetAssembly()))
- {
- BAD_FORMAT_NOTHROW_ASSERT(!"ValueType with non-public fields as a type of an RVA self-referencing static field");
- BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
- }
- }
}
DWORD dwNumInstanceFields = dwCurrentDeclaredField + (HasParent() ? GetParentMethodTable()->GetNumInstanceFields() : 0);
@@ -4464,15 +4426,6 @@ MethodTableBuilder::VerifySelfReferencingStaticValueTypeFields_WithRVA(
{
DWORD rva;
IfFailThrow(GetMDImport()->GetFieldRVA(pFD->GetMemberDef(), &rva));
-
- if (!GetModule()->CheckRvaField(rva, bmtFP->NumInstanceFieldBytes))
- {
- if (!Security::CanHaveRVA(GetAssembly()))
- {
- BAD_FORMAT_NOTHROW_ASSERT(!"Illegal RVA of a mapped self-referencing static field");
- BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
- }
- }
}
}
}
@@ -5126,6 +5079,20 @@ MethodTableBuilder::InitNewMethodDesc(
pNewMD->SetNotInline(true);
}
+ // Check for methods marked as [Intrinsic]
+ if (GetModule()->IsSystem())
+ {
+ HRESULT hr = GetMDImport()->GetCustomAttributeByName(pMethod->GetMethodSignature().GetToken(),
+ g_CompilerServicesIntrinsicAttribute,
+ NULL,
+ NULL);
+
+ if (hr == S_OK)
+ {
+ pNewMD->SetIsJitIntrinsic();
+ }
+ }
+
pNewMD->SetSlot(pMethod->GetSlotIndex());
}
@@ -6937,6 +6904,12 @@ MethodTableBuilder::NeedsNativeCodeSlot(bmtMDMethod * pMDMethod)
}
#endif
+#if defined(FEATURE_JIT_PITCHING)
+ if ((CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchEnabled) != 0) &&
+ (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_JitPitchMemThreshold) != 0))
+ return TRUE;
+#endif
+
return GetModule()->IsEditAndContinueEnabled();
}
@@ -8216,188 +8189,6 @@ void MethodTableBuilder::StoreEightByteClassification(SystemVStructRegisterPassi
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
-#ifdef FEATURE_HFA
-//---------------------------------------------------------------------------------------
-//
-VOID
-MethodTableBuilder::CheckForHFA(MethodTable ** pByValueClassCache)
-{
- STANDARD_VM_CONTRACT;
-
- // This method should be called for valuetypes only
- _ASSERTE(IsValueClass());
-
- // No HFAs with explicit layout. There may be cases where explicit layout may be still
- // eligible for HFA, but it is hard to tell the real intent. Make it simple and just
- // unconditionally disable HFAs for explicit layout.
- if (HasExplicitFieldOffsetLayout())
- return;
-
- CorElementType hfaType = ELEMENT_TYPE_END;
-
- FieldDesc *pFieldDescList = GetHalfBakedClass()->GetFieldDescList();
- for (UINT i = 0; i < bmtEnumFields->dwNumInstanceFields; i++)
- {
- FieldDesc *pFD = &pFieldDescList[i];
- CorElementType fieldType = pFD->GetFieldType();
-
- switch (fieldType)
- {
- case ELEMENT_TYPE_VALUETYPE:
- fieldType = pByValueClassCache[i]->GetHFAType();
- break;
-
- case ELEMENT_TYPE_R4:
- case ELEMENT_TYPE_R8:
- break;
-
- default:
- // Not HFA
- return;
- }
-
- // Field type should be a valid HFA type.
- if (fieldType == ELEMENT_TYPE_END)
- {
- return;
- }
-
- // Initialize with a valid HFA type.
- if (hfaType == ELEMENT_TYPE_END)
- {
- hfaType = fieldType;
- }
- // All field types should be equal.
- else if (fieldType != hfaType)
- {
- return;
- }
- }
-
- if (hfaType == ELEMENT_TYPE_END)
- return;
-
- int elemSize = (hfaType == ELEMENT_TYPE_R8) ? sizeof(double) : sizeof(float);
-
- // Note that we check the total size, but do not perform any checks on number of fields:
- // - Type of fields can be HFA valuetype itself
- // - Managed C++ HFA valuetypes have just one <alignment member> of type float to signal that
- // the valuetype is HFA and explicitly specified size
-
- DWORD totalSize = bmtFP->NumInstanceFieldBytes;
-
- if (totalSize % elemSize != 0)
- return;
-
- // On ARM, HFAs can have a maximum of four fields regardless of whether those are float or double.
- if (totalSize / elemSize > 4)
- return;
-
- // All the above tests passed. It's HFA!
- GetHalfBakedMethodTable()->SetIsHFA();
-}
-
-//
-// The managed and unmanaged views of the types can differ for non-blitable types. This method
-// mirrors the HFA type computation for the unmanaged view.
-//
-void MethodTableBuilder::CheckForNativeHFA()
-{
- STANDARD_VM_CONTRACT;
-
- // No HFAs with inheritance
- if (!(IsValueClass() || (GetParentMethodTable() == g_pObjectClass)))
- return;
-
- // No HFAs with explicit layout. There may be cases where explicit layout may be still
- // eligible for HFA, but it is hard to tell the real intent. Make it simple and just
- // unconditionally disable HFAs for explicit layout.
- if (HasExplicitFieldOffsetLayout())
- return;
-
- const FieldMarshaler *pFieldMarshaler = GetLayoutInfo()->GetFieldMarshalers();
- UINT numReferenceFields = GetLayoutInfo()->GetNumCTMFields();
-
- CorElementType hfaType = ELEMENT_TYPE_END;
-
- while (numReferenceFields--)
- {
- CorElementType fieldType = ELEMENT_TYPE_END;
-
- switch (pFieldMarshaler->GetNStructFieldType())
- {
- case NFT_COPY4:
- case NFT_COPY8:
- fieldType = pFieldMarshaler->GetFieldDesc()->GetFieldType();
- if (fieldType != ELEMENT_TYPE_R4 && fieldType != ELEMENT_TYPE_R8)
- return;
- break;
-
- case NFT_NESTEDLAYOUTCLASS:
- fieldType = ((FieldMarshaler_NestedLayoutClass *)pFieldMarshaler)->GetMethodTable()->GetNativeHFAType();
- break;
-
- case NFT_NESTEDVALUECLASS:
- fieldType = ((FieldMarshaler_NestedValueClass *)pFieldMarshaler)->GetMethodTable()->GetNativeHFAType();
- break;
-
- case NFT_FIXEDARRAY:
- fieldType = ((FieldMarshaler_FixedArray *)pFieldMarshaler)->GetElementTypeHandle().GetMethodTable()->GetNativeHFAType();
- break;
-
- case NFT_DATE:
- fieldType = ELEMENT_TYPE_R8;
- break;
-
- default:
- // Not HFA
- return;
- }
-
- // Field type should be a valid HFA type.
- if (fieldType == ELEMENT_TYPE_END)
- {
- return;
- }
-
- // Initialize with a valid HFA type.
- if (hfaType == ELEMENT_TYPE_END)
- {
- hfaType = fieldType;
- }
- // All field types should be equal.
- else if (fieldType != hfaType)
- {
- return;
- }
-
- ((BYTE*&)pFieldMarshaler) += MAXFIELDMARSHALERSIZE;
- }
-
- if (hfaType == ELEMENT_TYPE_END)
- return;
-
- int elemSize = (hfaType == ELEMENT_TYPE_R8) ? sizeof(double) : sizeof(float);
-
- // Note that we check the total size, but do not perform any checks on number of fields:
- // - Type of fields can be HFA valuetype itself
- // - Managed C++ HFA valuetypes have just one <alignment member> of type float to signal that
- // the valuetype is HFA and explicitly specified size
-
- DWORD totalSize = GetHalfBakedClass()->GetNativeSize();
-
- if (totalSize % elemSize != 0)
- return;
-
- // On ARM, HFAs can have a maximum of four fields regardless of whether those are float or double.
- if (totalSize / elemSize > 4)
- return;
-
- // All the above tests passed. It's HFA!
- GetLayoutInfo()->SetNativeHFAType(hfaType);
-}
-#endif // FEATURE_HFA
-
//---------------------------------------------------------------------------------------
//
// make sure that no object fields are overlapped incorrectly and define the
@@ -8633,17 +8424,6 @@ MethodTableBuilder::HandleExplicitLayout(
IDS_CLASSLOAD_EXPLICIT_LAYOUT);
}
- if (!explicitClassTrust.IsVerifiable())
- {
- if (!Security::CanSkipVerification(GetAssembly()->GetDomainAssembly()))
- {
- ThrowFieldLayoutError(GetCl(),
- GetModule(),
- firstObjectOverlapOffset,
- IDS_CLASSLOAD_UNVERIFIABLE_FIELD_LAYOUT);
- }
- }
-
if (!explicitClassTrust.IsNonOverLayed())
{
SetHasOverLayedFields();
@@ -9006,7 +8786,7 @@ void MethodTableBuilder::CopyExactParentSlots(MethodTable *pMT, MethodTable *pAp
//
// Non-canonical method tables either share everything or nothing so it is sufficient to check
// just the first indirection to detect sharing.
- if (pMT->GetVtableIndirections()[0] != pCanonMT->GetVtableIndirections()[0])
+ if (pMT->GetVtableIndirections()[0].GetValueMaybeNull() != pCanonMT->GetVtableIndirections()[0].GetValueMaybeNull())
{
for (DWORD i = 0; i < nParentVirtuals; i++)
{
@@ -9033,7 +8813,7 @@ void MethodTableBuilder::CopyExactParentSlots(MethodTable *pMT, MethodTable *pAp
// We need to re-inherit this slot from the exact parent.
DWORD indirectionIndex = MethodTable::GetIndexOfVtableIndirection(i);
- if (pMT->GetVtableIndirections()[indirectionIndex] == pApproxParentMT->GetVtableIndirections()[indirectionIndex])
+ if (pMT->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull() == pApproxParentMT->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull())
{
// The slot lives in a chunk shared from the approximate parent MT
// If so, we need to change to share the chunk from the exact parent MT
@@ -9044,7 +8824,7 @@ void MethodTableBuilder::CopyExactParentSlots(MethodTable *pMT, MethodTable *pAp
_ASSERTE(MethodTable::CanShareVtableChunksFrom(pParentMT, pMT->GetLoaderModule()));
#endif
- pMT->GetVtableIndirections()[indirectionIndex] = pParentMT->GetVtableIndirections()[indirectionIndex];
+ pMT->GetVtableIndirections()[indirectionIndex].SetValueMaybeNull(pParentMT->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull());
i = MethodTable::GetEndSlotForVtableIndirection(indirectionIndex, nParentVirtuals) - 1;
continue;
@@ -9901,7 +9681,7 @@ MethodTable * MethodTableBuilder::AllocateNewMT(Module *pLoaderModule,
S_SIZE_T cbTotalSize = S_SIZE_T(dwGCSize) + S_SIZE_T(sizeof(MethodTable));
// vtable
- cbTotalSize += MethodTable::GetNumVtableIndirections(dwVirtuals) * sizeof(PTR_PCODE);
+ cbTotalSize += MethodTable::GetNumVtableIndirections(dwVirtuals) * sizeof(MethodTable::VTableIndir_t);
DWORD dwMultipurposeSlotsMask = 0;
@@ -9945,7 +9725,7 @@ MethodTable * MethodTableBuilder::AllocateNewMT(Module *pLoaderModule,
if (dwNumDicts != 0)
{
cbTotalSize += sizeof(GenericsDictInfo);
- cbTotalSize += S_SIZE_T(dwNumDicts) * S_SIZE_T(sizeof(TypeHandle*));
+ cbTotalSize += S_SIZE_T(dwNumDicts) * S_SIZE_T(sizeof(MethodTable::PerInstInfoElem_t));
cbTotalSize += cbInstAndDict;
}
@@ -10050,7 +9830,7 @@ MethodTable * MethodTableBuilder::AllocateNewMT(Module *pLoaderModule,
{
// Share the parent chunk
_ASSERTE(it.GetEndSlot() <= pMTParent->GetNumVirtuals());
- it.SetIndirectionSlot(pMTParent->GetVtableIndirections()[it.GetIndex()]);
+ it.SetIndirectionSlot(pMTParent->GetVtableIndirections()[it.GetIndex()].GetValueMaybeNull());
}
else
{
@@ -10098,19 +9878,20 @@ MethodTable * MethodTableBuilder::AllocateNewMT(Module *pLoaderModule,
// the dictionary pointers follow the interface map
if (dwNumDicts)
{
- Dictionary** pPerInstInfo = (Dictionary**)(pData + offsetOfInstAndDict.Value() + sizeof(GenericsDictInfo));
+ MethodTable::PerInstInfoElem_t *pPerInstInfo = (MethodTable::PerInstInfoElem_t *)(pData + offsetOfInstAndDict.Value() + sizeof(GenericsDictInfo));
pMT->SetPerInstInfo ( pPerInstInfo);
// Fill in the dictionary for this type, if it's instantiated
if (cbInstAndDict)
{
- *(pPerInstInfo + (dwNumDicts-1)) = (Dictionary*) (pPerInstInfo + dwNumDicts);
+ MethodTable::PerInstInfoElem_t *pPInstInfo = (MethodTable::PerInstInfoElem_t *)(pPerInstInfo + (dwNumDicts-1));
+ pPInstInfo->SetValueMaybeNull((Dictionary*) (pPerInstInfo + dwNumDicts));
}
}
#ifdef _DEBUG
- pMT->m_pWriteableData->m_dwLastVerifedGCCnt = (DWORD)-1;
+ pMT->m_pWriteableData.GetValue()->m_dwLastVerifedGCCnt = (DWORD)-1;
#endif // _DEBUG
RETURN(pMT);
@@ -10599,7 +10380,7 @@ MethodTableBuilder::SetupMethodTable2(
// with code:MethodDesc::SetStableEntryPointInterlocked.
//
DWORD indirectionIndex = MethodTable::GetIndexOfVtableIndirection(iCurSlot);
- if (GetParentMethodTable()->GetVtableIndirections()[indirectionIndex] != pMT->GetVtableIndirections()[indirectionIndex])
+ if (GetParentMethodTable()->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull() != pMT->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull())
pMT->SetSlot(iCurSlot, pMD->GetMethodEntryPoint());
}
else
diff --git a/src/vm/methodtablebuilder.h b/src/vm/methodtablebuilder.h
index 8c838d37e6..fcb10a86fc 100644
--- a/src/vm/methodtablebuilder.h
+++ b/src/vm/methodtablebuilder.h
@@ -223,7 +223,6 @@ private:
BOOL IsDelegate() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->IsDelegate(); }
BOOL IsNested() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->IsNested(); }
BOOL HasFieldsWhichMustBeInited() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->HasFieldsWhichMustBeInited(); }
- BOOL HasRemotingProxyAttribute() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->HasRemotingProxyAttribute(); }
BOOL IsBlittable() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->IsBlittable(); }
PTR_MethodDescChunk GetChunks() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->GetChunks(); }
BOOL HasExplicitFieldOffsetLayout() { WRAPPER_NO_CONTRACT; return GetHalfBakedClass()->HasExplicitFieldOffsetLayout(); }
@@ -258,7 +257,6 @@ private:
void SetNumBoxedRegularStatics(WORD x) { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetNumBoxedRegularStatics(x); }
void SetNumBoxedThreadStatics(WORD x) { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetNumBoxedThreadStatics(x); }
void SetAlign8Candidate() { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetAlign8Candidate(); }
- void SetHasRemotingProxyAttribute() { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetHasRemotingProxyAttribute(); }
void SetHasOverLayedFields() { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetHasOverLayedFields(); }
void SetNonGCRegularStaticFieldBytes(DWORD x) { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetNonGCRegularStaticFieldBytes(x); }
void SetNonGCThreadStaticFieldBytes(DWORD x) { WRAPPER_NO_CONTRACT; GetHalfBakedClass()->SetNonGCThreadStaticFieldBytes(x); }
diff --git a/src/vm/mngstdinterfaces.cpp b/src/vm/mngstdinterfaces.cpp
index 5aafe8a97c..d6de4f32a0 100644
--- a/src/vm/mngstdinterfaces.cpp
+++ b/src/vm/mngstdinterfaces.cpp
@@ -22,7 +22,6 @@
#include "method.hpp"
#include "runtimecallablewrapper.h"
#include "excep.h"
-#include "security.h"
#include "typeparse.h"
//
@@ -217,9 +216,6 @@ LPVOID MngStdItfBase::ForwardCallToManagedView(
// The target isn't a TP so it better be a COM object.
_ASSERTE(Lr.Obj->GetMethodTable()->IsComObjectType());
- // We are about to call out to ummanaged code so we need to make a security check.
- Security::SpecialDemand(SSWT_DEMAND_FROM_NATIVE, SECURITY_UNMANAGED_CODE);
-
{
RCWHolder pRCW(GetThread());
RCWPROTECT_BEGIN(pRCW, Lr.Obj);
diff --git a/src/vm/mscorlib.h b/src/vm/mscorlib.h
index a9574213af..45462b33dd 100644
--- a/src/vm/mscorlib.h
+++ b/src/vm/mscorlib.h
@@ -113,11 +113,6 @@ DEFINE_CLASS_U(System, AppDomainSetup, AppDomainSetupObjec
DEFINE_FIELD_U(_Entries, AppDomainSetupObject, m_Entries)
DEFINE_FIELD_U(_AppBase, AppDomainSetupObject, m_AppBase)
DEFINE_FIELD_U(_CompatFlags, AppDomainSetupObject, m_CompatFlags)
-DEFINE_FIELD_U(_TargetFrameworkName, AppDomainSetupObject, m_TargetFrameworkName)
-DEFINE_FIELD_U(_CheckedForTargetFrameworkName, AppDomainSetupObject, m_CheckedForTargetFrameworkName)
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
-DEFINE_FIELD_U(_UseRandomizedStringHashing, AppDomainSetupObject, m_UseRandomizedStringHashing)
-#endif
DEFINE_CLASS(ARG_ITERATOR, System, ArgIterator)
DEFINE_CLASS_U(System, ArgIterator, VARARGS) // Includes a SigPointer.
@@ -165,7 +160,9 @@ DEFINE_FIELD_U(_Minor, VersionBaseObject, m_Minor)
DEFINE_FIELD_U(_Build, VersionBaseObject, m_Build)
DEFINE_FIELD_U(_Revision, VersionBaseObject, m_Revision)
DEFINE_CLASS(VERSION, System, Version)
-DEFINE_METHOD(VERSION, CTOR, .ctor, IM_Int_Int_Int_Int_RetVoid)
+DEFINE_METHOD(VERSION, CTOR_Ix2, .ctor, IM_Int_Int_RetVoid)
+DEFINE_METHOD(VERSION, CTOR_Ix3, .ctor, IM_Int_Int_Int_RetVoid)
+DEFINE_METHOD(VERSION, CTOR_Ix4, .ctor, IM_Int_Int_Int_Int_RetVoid)
DEFINE_CLASS(ASSEMBLY_VERSION_COMPATIBILITY, Assemblies, AssemblyVersionCompatibility)
@@ -609,12 +606,7 @@ DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_CLOSED, CtorClosed,
DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_CLOSED_STATIC, CtorClosedStatic, IM_Obj_IntPtr_RetVoid)
DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_RT_CLOSED, CtorRTClosed, IM_Obj_IntPtr_RetVoid)
DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_OPENED, CtorOpened, IM_Obj_IntPtr_IntPtr_RetVoid)
-DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_SECURE_CLOSED, CtorSecureClosed, IM_Obj_IntPtr_IntPtr_IntPtr_RetVoid)
-DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_SECURE_CLOSED_STATIC,CtorSecureClosedStatic, IM_Obj_IntPtr_IntPtr_IntPtr_RetVoid)
-DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_SECURE_RT_CLOSED, CtorSecureRTClosed, IM_Obj_IntPtr_IntPtr_IntPtr_RetVoid)
-DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_SECURE_OPENED, CtorSecureOpened, IM_Obj_IntPtr_IntPtr_IntPtr_IntPtr_RetVoid)
DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_VIRTUAL_DISPATCH, CtorVirtualDispatch, IM_Obj_IntPtr_IntPtr_RetVoid)
-DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_SECURE_VIRTUAL_DISPATCH, CtorSecureVirtualDispatch, IM_Obj_IntPtr_IntPtr_IntPtr_IntPtr_RetVoid)
DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_COLLECTIBLE_CLOSED_STATIC, CtorCollectibleClosedStatic, IM_Obj_IntPtr_IntPtr_RetVoid)
DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_COLLECTIBLE_OPENED, CtorCollectibleOpened, IM_Obj_IntPtr_IntPtr_IntPtr_RetVoid)
DEFINE_METHOD(MULTICAST_DELEGATE, CTOR_COLLECTIBLE_VIRTUAL_DISPATCH, CtorCollectibleVirtualDispatch, IM_Obj_IntPtr_IntPtr_IntPtr_RetVoid)
@@ -785,8 +777,10 @@ DEFINE_METHOD(JIT_HELPERS, GET_RAW_SZ_ARRAY_DATA, GetRawSzArrayData, N
DEFINE_CLASS(UNSAFE, CompilerServices, Unsafe)
DEFINE_METHOD(UNSAFE, AS_POINTER, AsPointer, NoSig)
DEFINE_METHOD(UNSAFE, SIZEOF, SizeOf, NoSig)
-DEFINE_METHOD(UNSAFE, BYREF_AS, As, NoSig)
-DEFINE_METHOD(UNSAFE, BYREF_ADD, Add, NoSig)
+DEFINE_METHOD(UNSAFE, BYREF_AS, As, GM_RefTFrom_RetRefTTo)
+DEFINE_METHOD(UNSAFE, OBJECT_AS, As, GM_Obj_RetT)
+DEFINE_METHOD(UNSAFE, BYREF_ADD, Add, GM_RefT_Int_RetRefT)
+DEFINE_METHOD(UNSAFE, PTR_ADD, Add, GM_PtrVoid_Int_RetPtrVoid)
DEFINE_METHOD(UNSAFE, BYREF_ADD_BYTE_OFFSET, AddByteOffset, NoSig)
DEFINE_METHOD(UNSAFE, BYREF_ARE_SAME, AreSame, NoSig)
DEFINE_METHOD(UNSAFE, BYREF_INIT_BLOCK_UNALIGNED, InitBlockUnaligned, NoSig)
@@ -807,7 +801,7 @@ DEFINE_FIELD(ARRAY_PINNING_HELPER, M_ARRAY_DATA, m_arrayData)
DEFINE_CLASS(RUNTIME_WRAPPED_EXCEPTION, CompilerServices, RuntimeWrappedException)
DEFINE_METHOD(RUNTIME_WRAPPED_EXCEPTION, OBJ_CTOR, .ctor, IM_Obj_RetVoid)
-DEFINE_FIELD(RUNTIME_WRAPPED_EXCEPTION, WRAPPED_EXCEPTION, m_wrappedException)
+DEFINE_FIELD(RUNTIME_WRAPPED_EXCEPTION, WRAPPED_EXCEPTION, _wrappedException)
DEFINE_CLASS_U(Interop, SafeHandle, SafeHandle)
DEFINE_FIELD_U(handle, SafeHandle, m_handle)
@@ -881,6 +875,7 @@ DEFINE_METHOD(STRING, CTORF_CHARARRAY_START_LEN,CtorCharArrayStart
DEFINE_METHOD(STRING, CTORF_CHAR_COUNT, CtorCharCount, IM_Char_Int_RetStr)
DEFINE_METHOD(STRING, CTORF_CHARPTR, CtorCharPtr, IM_PtrChar_RetStr)
DEFINE_METHOD(STRING, CTORF_CHARPTR_START_LEN,CtorCharPtrStartLength, IM_PtrChar_Int_Int_RetStr)
+DEFINE_METHOD(STRING, CTORF_READONLYSPANOFCHAR,CtorReadOnlySpanOfChar, IM_ReadOnlySpanOfChar_RetStr)
DEFINE_METHOD(STRING, INTERNAL_COPY, InternalCopy, SM_Str_IntPtr_Int_RetVoid)
DEFINE_METHOD(STRING, WCSLEN, wcslen, SM_PtrChar_RetInt)
DEFINE_PROPERTY(STRING, LENGTH, Length, Int)
@@ -971,6 +966,8 @@ DEFINE_CLASS(UNKNOWN_WRAPPER, Interop, UnknownWrapper)
#endif
DEFINE_CLASS(VALUE_TYPE, System, ValueType)
+DEFINE_METHOD(VALUE_TYPE, GET_HASH_CODE, GetHashCode, IM_RetInt)
+DEFINE_METHOD(VALUE_TYPE, EQUALS, Equals, IM_Obj_RetBool)
#ifdef FEATURE_COMINTEROP
DEFINE_CLASS(VARIANT_WRAPPER, Interop, VariantWrapper)
diff --git a/src/vm/multicorejit.cpp b/src/vm/multicorejit.cpp
index 4ad5447950..d35c3f7d9a 100644
--- a/src/vm/multicorejit.cpp
+++ b/src/vm/multicorejit.cpp
@@ -12,12 +12,10 @@
#include "common.h"
#include "vars.hpp"
-#include "security.h"
#include "eeconfig.h"
#include "dllimport.h"
#include "comdelegate.h"
#include "dbginterface.h"
-#include "listlock.inl"
#include "stubgen.h"
#include "eventtrace.h"
#include "array.h"
@@ -993,7 +991,7 @@ PCODE MulticoreJitRecorder::RequestMethodCode(MethodDesc * pMethod, MulticoreJit
PCODE pCode = NULL;
- pCode = pManager->GetMulticoreJitCodeStorage().QueryMethodCode(pMethod);
+ pCode = pManager->GetMulticoreJitCodeStorage().QueryMethodCode(pMethod, TRUE);
if ((pCode != NULL) && pManager->IsRecorderActive()) // recorder may be off when player is on (e.g. for Appx)
{
diff --git a/src/vm/multicorejit.h b/src/vm/multicorejit.h
index b7a0951ee1..047ba01a5f 100644
--- a/src/vm/multicorejit.h
+++ b/src/vm/multicorejit.h
@@ -103,7 +103,7 @@ public:
void StoreMethodCode(MethodDesc * pMethod, PCODE pCode);
- PCODE QueryMethodCode(MethodDesc * pMethod);
+ PCODE QueryMethodCode(MethodDesc * pMethod, BOOL shouldRemoveCode);
inline unsigned GetRemainingMethodCount() const
{
diff --git a/src/vm/multicorejitplayer.cpp b/src/vm/multicorejitplayer.cpp
index d7c2cec8a1..247fa0a14a 100644
--- a/src/vm/multicorejitplayer.cpp
+++ b/src/vm/multicorejitplayer.cpp
@@ -12,12 +12,10 @@
#include "common.h"
#include "vars.hpp"
-#include "security.h"
#include "eeconfig.h"
#include "dllimport.h"
#include "comdelegate.h"
#include "dbginterface.h"
-#include "listlock.inl"
#include "stubgen.h"
#include "eventtrace.h"
#include "array.h"
@@ -103,7 +101,7 @@ void MulticoreJitCodeStorage::StoreMethodCode(MethodDesc * pMD, PCODE pCode)
// Query from MakeJitWorker: Lookup stored JITted methods
-PCODE MulticoreJitCodeStorage::QueryMethodCode(MethodDesc * pMethod)
+PCODE MulticoreJitCodeStorage::QueryMethodCode(MethodDesc * pMethod, BOOL shouldRemoveCode)
{
STANDARD_VM_CONTRACT;
@@ -113,7 +111,7 @@ PCODE MulticoreJitCodeStorage::QueryMethodCode(MethodDesc * pMethod)
{
CrstHolder holder(& m_crstCodeMap);
- if (m_nativeCodeMap.Lookup(pMethod, & code))
+ if (m_nativeCodeMap.Lookup(pMethod, & code) && shouldRemoveCode)
{
m_nReturned ++;
@@ -507,6 +505,23 @@ HRESULT MulticoreJitProfilePlayer::HandleModuleRecord(const ModuleRecord * pMod)
}
+#ifndef DACCESS_COMPILE
+class MulticoreJitPrepareCodeConfig : public PrepareCodeConfig
+{
+public:
+ MulticoreJitPrepareCodeConfig(MethodDesc* pMethod) :
+ PrepareCodeConfig(NativeCodeVersion(pMethod), FALSE, FALSE)
+ {}
+
+ virtual BOOL SetNativeCode(PCODE pCode, PCODE * ppAlternateCodeToUse)
+ {
+ MulticoreJitManager & mcJitManager = GetAppDomain()->GetMulticoreJitManager();
+ mcJitManager.GetMulticoreJitCodeStorage().StoreMethodCode(GetMethodDesc(), pCode);
+ return TRUE;
+ }
+};
+#endif
+
// Call JIT to compile a method
bool MulticoreJitProfilePlayer::CompileMethodDesc(Module * pModule, MethodDesc * pMD)
@@ -529,8 +544,9 @@ bool MulticoreJitProfilePlayer::CompileMethodDesc(Module * pModule, MethodDesc *
// Reset the flag to allow managed code to be called in multicore JIT background thread from this routine
ThreadStateNCStackHolder holder(-1, Thread::TSNC_CallingManagedCodeDisabled);
- // MakeJitWorker calls back to MulticoreJitCodeStorage::StoreMethodCode under MethodDesc lock
- pMD->MakeJitWorker(& header, CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_MCJIT_BACKGROUND));
+ // PrepareCode calls back to MulticoreJitCodeStorage::StoreMethodCode under MethodDesc lock
+ MulticoreJitPrepareCodeConfig config(pMD);
+ pMD->PrepareCode(&config);
return true;
}
diff --git a/src/vm/object.h b/src/vm/object.h
index add704a2f7..20d7d50961 100644
--- a/src/vm/object.h
+++ b/src/vm/object.h
@@ -1898,12 +1898,6 @@ class AppDomainSetupObject : public Object
PTRARRAYREF m_Entries;
STRINGREF m_AppBase;
OBJECTREF m_CompatFlags;
- STRINGREF m_TargetFrameworkName;
- CLR_BOOL m_CheckedForTargetFrameworkName;
-#ifdef FEATURE_RANDOMIZED_STRING_HASHING
- CLR_BOOL m_UseRandomizedStringHashing;
-#endif
-
protected:
AppDomainSetupObject() { LIMITED_METHOD_CONTRACT; }
diff --git a/src/vm/olevariant.cpp b/src/vm/olevariant.cpp
index a15a1979e2..1ec6f70213 100644
--- a/src/vm/olevariant.cpp
+++ b/src/vm/olevariant.cpp
@@ -14,7 +14,6 @@
#include "excep.h"
#include "frames.h"
#include "vars.hpp"
-#include "security.h"
#include "olevariant.h"
#include "comdatetime.h"
#include "fieldmarshaler.h"
@@ -2563,12 +2562,6 @@ void OleVariant::MarshalRecordVariantOleToCom(VARIANT *pOleVariant,
if (!pValueClass)
COMPlusThrow(kArgumentException, IDS_EE_CANNOT_MAP_TO_MANAGED_VC);
- Module* pModule = pValueClass->GetModule();
- if (!Security::CanCallUnmanagedCode(pModule))
- {
- COMPlusThrow(kArgumentException, IDS_EE_VTRECORD_SECURITY);
- }
-
// Now that we have the value class, allocate an instance of the
// boxed value class and copy the contents of the record into it.
BoxedValueClass = AllocateObject(pValueClass);
@@ -2597,12 +2590,6 @@ void OleVariant::MarshalRecordVariantComToOle(VariantData *pComVariant,
GCPROTECT_BEGIN(BoxedValueClass)
{
_ASSERTE(BoxedValueClass != NULL);
- Module* pModule = BoxedValueClass->GetMethodTable()->GetModule();
- if (!Security::CanCallUnmanagedCode(pModule))
- {
- COMPlusThrow(kArgumentException, IDS_EE_VTRECORD_SECURITY);
- }
-
ConvertValueClassToVariant(&BoxedValueClass, pOleVariant);
}
GCPROTECT_END();
@@ -2633,12 +2620,6 @@ void OleVariant::MarshalRecordArrayOleToCom(void *oleArray, BASEARRAYREF *pComAr
}
CONTRACTL_END;
- Module* pModule = pElementMT->GetModule();
- if (!Security::CanCallUnmanagedCode(pModule))
- {
- COMPlusThrow(kArgumentException, IDS_EE_VTRECORD_SECURITY);
- }
-
if (pElementMT->IsBlittable())
{
// The array is blittable so we can simply copy it.
@@ -2671,12 +2652,6 @@ void OleVariant::MarshalRecordArrayComToOle(BASEARRAYREF *pComArray, void *oleAr
}
CONTRACTL_END;
- Module* pModule = pElementMT->GetModule();
- if (!Security::CanCallUnmanagedCode(pModule))
- {
- COMPlusThrow(kArgumentException, IDS_EE_VTRECORD_SECURITY);
- }
-
if (pElementMT->IsBlittable())
{
// The array is blittable so we can simply copy it.
diff --git a/src/vm/pefile.cpp b/src/vm/pefile.cpp
index 5d83ee97cb..306a52269c 100644
--- a/src/vm/pefile.cpp
+++ b/src/vm/pefile.cpp
@@ -17,7 +17,6 @@
#include "eeconfig.h"
#include "product_version.h"
#include "eventtrace.h"
-#include "security.h"
#include "corperm.h"
#include "dbginterface.h"
#include "peimagelayout.inl"
diff --git a/src/vm/perfmap.cpp b/src/vm/perfmap.cpp
index b664b72d7a..e6643fe772 100644
--- a/src/vm/perfmap.cpp
+++ b/src/vm/perfmap.cpp
@@ -27,6 +27,13 @@ void PerfMap::Initialize()
// Create the map.
s_Current = new PerfMap(currentPid);
+
+ int signalNum = (int) CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_PerfMapIgnoreSignal);
+
+ if (signalNum > 0)
+ {
+ PAL_IgnoreProfileSignal(signalNum);
+ }
}
}
@@ -50,6 +57,8 @@ PerfMap::PerfMap(int pid)
// Initialize with no failures.
m_ErrorEncountered = false;
+ m_StubsMapped = 0;
+
// Build the path to the map file on disk.
WCHAR tempPath[MAX_LONGPATH+1];
if(!GetTempPathW(MAX_LONGPATH, tempPath))
@@ -76,6 +85,8 @@ PerfMap::PerfMap()
// Initialize with no failures.
m_ErrorEncountered = false;
+
+ m_StubsMapped = 0;
}
// Clean-up resources.
@@ -218,6 +229,38 @@ void PerfMap::LogJITCompiledMethod(MethodDesc * pMethod, PCODE pCode, size_t cod
}
}
+// Log a set of stub to the map.
+void PerfMap::LogStubs(const char* stubType, const char* stubOwner, PCODE pCode, size_t codeSize)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ if (s_Current == nullptr || s_Current->m_FileStream == nullptr)
+ {
+ return;
+ }
+
+ // Logging failures should not cause any exceptions to flow upstream.
+ EX_TRY
+ {
+ if(!stubOwner)
+ {
+ stubOwner = "?";
+ }
+ if(!stubType)
+ {
+ stubOwner = "?";
+ }
+
+ // Build the map file line.
+ SString line;
+ line.Printf("%p %x stub<%d> %s<%s>\n", pCode, codeSize, ++(s_Current->m_StubsMapped), stubType, stubOwner);
+
+ // Write the line.
+ s_Current->WriteLine(line);
+ }
+ EX_CATCH{} EX_END_CATCH(SwallowAllExceptions);
+}
+
void PerfMap::GetNativeImageSignature(PEFile * pFile, WCHAR * pwszSig, unsigned int nSigSize)
{
CONTRACTL{
diff --git a/src/vm/perfmap.h b/src/vm/perfmap.h
index fe38ed3ad5..1f06bd4091 100644
--- a/src/vm/perfmap.h
+++ b/src/vm/perfmap.h
@@ -28,6 +28,9 @@ private:
// Set to true if an error is encountered when writing to the file.
bool m_ErrorEncountered;
+ // Set to true if an error is encountered when writing to the file.
+ unsigned m_StubsMapped;
+
// Construct a new map for the specified pid.
PerfMap(int pid);
@@ -64,6 +67,9 @@ public:
// Log a JIT compiled method to the map.
static void LogJITCompiledMethod(MethodDesc * pMethod, PCODE pCode, size_t codeSize);
+ // Log a set of stub to the map.
+ static void LogStubs(const char* stubType, const char* stubOwner, PCODE pCode, size_t codeSize);
+
// Close the map and flush any remaining data.
static void Destroy();
};
diff --git a/src/vm/precode.cpp b/src/vm/precode.cpp
index 1daf6e32b8..8891d5a903 100644
--- a/src/vm/precode.cpp
+++ b/src/vm/precode.cpp
@@ -15,6 +15,10 @@
#include "compile.h"
#endif
+#ifdef FEATURE_PERFMAP
+#include "perfmap.h"
+#endif
+
//==========================================================================================
// class Precode
//==========================================================================================
@@ -556,6 +560,9 @@ TADDR Precode::AllocateTemporaryEntryPoints(MethodDescChunk * pChunk,
pMD = (MethodDesc *)(dac_cast<TADDR>(pMD) + pMD->SizeOf());
}
+#ifdef FEATURE_PERFMAP
+ PerfMap::LogStubs(__FUNCTION__, "PRECODE_FIXUP", (PCODE)temporaryEntryPoints, count * sizeof(FixupPrecode));
+#endif
ClrFlushInstructionCache((LPVOID)temporaryEntryPoints, count * sizeof(FixupPrecode));
return temporaryEntryPoints;
@@ -575,6 +582,10 @@ TADDR Precode::AllocateTemporaryEntryPoints(MethodDescChunk * pChunk,
pMD = (MethodDesc *)(dac_cast<TADDR>(pMD) + pMD->SizeOf());
}
+#ifdef FEATURE_PERFMAP
+ PerfMap::LogStubs(__FUNCTION__, "PRECODE_STUB", (PCODE)temporaryEntryPoints, count * oneSize);
+#endif
+
ClrFlushInstructionCache((LPVOID)temporaryEntryPoints, count * oneSize);
return temporaryEntryPoints;
diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
index 31a5670e00..f96135008d 100644
--- a/src/vm/prestub.cpp
+++ b/src/vm/prestub.cpp
@@ -13,12 +13,10 @@
#include "common.h"
#include "vars.hpp"
-#include "security.h"
#include "eeconfig.h"
#include "dllimport.h"
#include "comdelegate.h"
#include "dbginterface.h"
-#include "listlock.inl"
#include "stubgen.h"
#include "eventtrace.h"
#include "array.h"
@@ -52,15 +50,18 @@
#include "callcounter.h"
#endif
-#ifndef DACCESS_COMPILE
-
-EXTERN_C void STDCALL ThePreStub();
-
-#if defined(HAS_COMPACT_ENTRYPOINTS) && defined (_TARGET_ARM_)
+#if defined(FEATURE_GDBJIT)
+#include "gdbjit.h"
+#endif // FEATURE_GDBJIT
-EXTERN_C void STDCALL ThePreStubCompactARM();
+#ifndef DACCESS_COMPILE
-#endif // defined(HAS_COMPACT_ENTRYPOINTS) && defined (_TARGET_ARM_)
+#if defined(FEATURE_JIT_PITCHING)
+EXTERN_C void CheckStacksAndPitch();
+EXTERN_C void SavePitchingCandidate(MethodDesc* pMD, ULONG sizeOfCode);
+EXTERN_C void DeleteFromPitchingCandidate(MethodDesc* pMD);
+EXTERN_C void MarkMethodNotPitchingCandidate(MethodDesc* pMD);
+#endif
EXTERN_C void STDCALL ThePreStubPatch();
@@ -72,17 +73,11 @@ PCODE MethodDesc::DoBackpatch(MethodTable * pMT, MethodTable *pDispatchingMT, BO
{
STANDARD_VM_CHECK;
PRECONDITION(!ContainsGenericVariables());
-#ifndef FEATURE_INTERPRETER
PRECONDITION(HasStableEntryPoint());
-#endif // FEATURE_INTERPRETER
PRECONDITION(pMT == GetMethodTable());
}
CONTRACTL_END;
-#ifdef FEATURE_INTERPRETER
- PCODE pTarget = GetMethodEntryPoint();
-#else
PCODE pTarget = GetStableEntryPoint();
-#endif
if (!HasTemporaryEntryPoint())
return pTarget;
@@ -231,17 +226,13 @@ void DACNotifyCompilationFinished(MethodDesc *methodDesc)
_ASSERTE(modulePtr);
-#ifndef FEATURE_GDBJIT
// Are we listed?
USHORT jnt = jn.Requested((TADDR) modulePtr, t);
if (jnt & CLRDATA_METHNOTIFY_GENERATED)
{
// If so, throw an exception!
-#endif
DACNotify::DoJITNotification(methodDesc);
-#ifndef FEATURE_GDBJIT
}
-#endif
}
}
@@ -250,90 +241,350 @@ void DACNotifyCompilationFinished(MethodDesc *methodDesc)
#endif
// </TODO>
+PCODE MethodDesc::PrepareInitialCode()
+{
+ STANDARD_VM_CONTRACT;
+ PrepareCodeConfig config(NativeCodeVersion(this), TRUE, TRUE);
+ PCODE pCode = PrepareCode(&config);
-// ********************************************************************
-// README!!
-// ********************************************************************
+#if defined(FEATURE_GDBJIT) && defined(FEATURE_PAL) && !defined(CROSSGEN_COMPILE)
+ NotifyGdb::MethodPrepared(this);
+#endif
-// MakeJitWorker is the thread safe way to invoke the JIT compiler
-// If multiple threads get in here for the same pMD, ALL of them
-// MUST return the SAME value for pstub.
-//
-// This function creates a DeadlockAware list of methods being jitted
-// which prevents us from trying to JIT the same method more that once.
+ return pCode;
+}
+PCODE MethodDesc::PrepareCode(NativeCodeVersion codeVersion)
+{
+ STANDARD_VM_CONTRACT;
+
+#ifdef FEATURE_CODE_VERSIONING
+ if (codeVersion.IsDefaultVersion())
+ {
+#endif
+ // fast path
+ PrepareCodeConfig config(codeVersion, TRUE, TRUE);
+ return PrepareCode(&config);
+#ifdef FEATURE_CODE_VERSIONING
+ }
+ else
+ {
+ // a bit slower path (+1 usec?)
+ VersionedPrepareCodeConfig config;
+ {
+ CodeVersionManager::TableLockHolder lock(GetCodeVersionManager());
+ config = VersionedPrepareCodeConfig(codeVersion);
+ }
+ config.FinishConfiguration();
+ return PrepareCode(&config);
+ }
+#endif
+
+}
-PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS flags)
+PCODE MethodDesc::PrepareCode(PrepareCodeConfig* pConfig)
{
STANDARD_VM_CONTRACT;
- BOOL fIsILStub = IsILStub(); // @TODO: understand the need for this special case
+ // If other kinds of code need multi-versioning we could add more cases here,
+ // but for now generation of all other code/stubs occurs in other code paths
+ _ASSERTE(IsIL() || IsNoMetadata());
+ return PrepareILBasedCode(pConfig);
+}
- LOG((LF_JIT, LL_INFO1000000,
- "MakeJitWorker(" FMT_ADDR ", %s) for %s:%s\n",
- DBG_ADDR(this),
- fIsILStub ? " TRUE" : "FALSE",
- GetMethodTable()->GetDebugClassName(),
- m_pszDebugMethodName));
+PCODE MethodDesc::PrepareILBasedCode(PrepareCodeConfig* pConfig)
+{
+ STANDARD_VM_CONTRACT;
+ PCODE pCode = NULL;
+
+ if (pConfig->MayUsePrecompiledCode())
+ {
+ pCode = GetPrecompiledCode(pConfig);
+ }
+ if (pCode == NULL)
+ {
+ LOG((LF_CLASSLOADER, LL_INFO1000000,
+ " In PrepareILBasedCode, calling JitCompileCode\n"));
+ // Mark the code as hot in case the method ends up in the native image
+ g_IBCLogger.LogMethodCodeAccess(this);
+ pCode = JitCompileCode(pConfig);
+ }
+ return pCode;
+}
+
+PCODE MethodDesc::GetPrecompiledCode(PrepareCodeConfig* pConfig)
+{
+ STANDARD_VM_CONTRACT;
PCODE pCode = NULL;
- ULONG sizeOfCode = 0;
-#if defined(FEATURE_INTERPRETER) || defined(FEATURE_TIERED_COMPILATION)
- BOOL fStable = TRUE; // True iff the new code address (to be stored in pCode), is a stable entry point.
+
+#ifdef FEATURE_PREJIT
+ pCode = GetPrecompiledNgenCode();
#endif
-#ifdef FEATURE_INTERPRETER
- PCODE pPreviousInterpStub = NULL;
- BOOL fInterpreted = FALSE;
+
+#ifdef FEATURE_READYTORUN
+ if (pCode == NULL)
+ {
+ pCode = GetPrecompiledR2RCode();
+ if (pCode != NULL)
+ {
+ pConfig->SetNativeCode(pCode, &pCode);
+ }
+ }
+#endif // FEATURE_READYTORUN
+
+ return pCode;
+}
+
+PCODE MethodDesc::GetPrecompiledNgenCode()
+{
+ STANDARD_VM_CONTRACT;
+ PCODE pCode = NULL;
+
+#ifdef FEATURE_PREJIT
+ pCode = GetPreImplementedCode();
+
+#ifdef PROFILING_SUPPORTED
+
+ // The pre-existing cache search callbacks aren't implemented as you might expect.
+ // Instead of sending a cache search started for all methods, we only send the notification
+ // when we already know a pre-compiled version of the method exists. In the NGEN case we also
+ // don't send callbacks unless the method triggers the prestub which excludes a lot of methods.
+ // From the profiler's perspective this technique is only reliable/predictable when using profiler
+ // instrumented NGEN images (that virtually no profilers use). As-is the callback only
+ // gives an opportunity for the profiler to say whether or not it wants to use the ngen'ed
+ // code.
+ //
+ // Despite those oddities I am leaving this behavior as-is during refactoring because trying to
+ // improve it probably offers little value vs. the potential for compat issues and creating more
+ // complexity reasoning how the API behavior changed across runtime releases.
+ if (pCode != NULL)
+ {
+ BOOL fShouldSearchCache = TRUE;
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
+ g_profControlBlock.pProfInterface->JITCachedFunctionSearchStarted((FunctionID)this, &fShouldSearchCache);
+ END_PIN_PROFILER();
+ }
+
+ if (!fShouldSearchCache)
+ {
+ SetNativeCodeInterlocked(NULL, pCode);
+ _ASSERTE(!IsPreImplemented());
+ pCode = NULL;
+ }
+ }
+#endif // PROFILING_SUPPORTED
+
+ if (pCode != NULL)
+ {
+ LOG((LF_ZAP, LL_INFO10000,
+ "ZAP: Using code" FMT_ADDR "for %s.%s sig=\"%s\" (token %x).\n",
+ DBG_ADDR(pCode),
+ m_pszDebugClassName,
+ m_pszDebugMethodName,
+ m_pszDebugMethodSignature,
+ GetMemberDef()));
+
+ TADDR pFixupList = GetFixupList();
+ if (pFixupList != NULL)
+ {
+ Module *pZapModule = GetZapModule();
+ _ASSERTE(pZapModule != NULL);
+ if (!pZapModule->FixupDelayList(pFixupList))
+ {
+ _ASSERTE(!"FixupDelayList failed");
+ ThrowHR(COR_E_BADIMAGEFORMAT);
+ }
+ }
+
+#ifdef HAVE_GCCOVER
+ if (GCStress<cfg_instr_ngen>::IsEnabled())
+ SetupGcCoverage(this, (BYTE*)pCode);
+#endif // HAVE_GCCOVER
+
+#ifdef PROFILING_SUPPORTED
+ /*
+ * This notifies the profiler that a search to find a
+ * cached jitted function has been made.
+ */
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
+ g_profControlBlock.pProfInterface->
+ JITCachedFunctionSearchFinished((FunctionID)this, COR_PRF_CACHED_FUNCTION_FOUND);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+
+ }
+#endif // FEATURE_PREJIT
+
+ return pCode;
+}
+
+
+PCODE MethodDesc::GetPrecompiledR2RCode()
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pCode = NULL;
+#ifdef FEATURE_READYTORUN
+ Module * pModule = GetModule();
+ if (pModule->IsReadyToRun())
+ {
+ pCode = pModule->GetReadyToRunInfo()->GetEntryPoint(this);
+ }
#endif
+ return pCode;
+}
+PCODE MethodDesc::GetMulticoreJitCode()
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pCode = NULL;
#ifdef FEATURE_MULTICOREJIT
+ // Quick check before calling expensive out of line function on this method's domain has code JITted by background thread
MulticoreJitManager & mcJitManager = GetAppDomain()->GetMulticoreJitManager();
-
- bool fBackgroundThread = flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_MCJIT_BACKGROUND);
+ if (mcJitManager.GetMulticoreJitCodeStorage().GetRemainingMethodCount() > 0)
+ {
+ if (MulticoreJitManager::IsMethodSupported(this))
+ {
+ pCode = mcJitManager.RequestMethodCode(this); // Query multi-core JIT manager for compiled code
+ }
+ }
#endif
+ return pCode;
+}
- // If this is the first stage of a tiered compilation progression, use tier0, otherwise
- // use default compilation options
-#ifdef FEATURE_TIERED_COMPILATION
- if (!IsEligibleForTieredCompilation())
+COR_ILMETHOD_DECODER* MethodDesc::GetAndVerifyMetadataILHeader(PrepareCodeConfig* pConfig, COR_ILMETHOD_DECODER* pDecoderMemory)
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(!IsNoMetadata());
+
+ COR_ILMETHOD_DECODER* pHeader = NULL;
+ COR_ILMETHOD* ilHeader = pConfig->GetILHeader();
+ if (ilHeader == NULL)
+ {
+#ifdef FEATURE_COMINTEROP
+ // Abstract methods can be called through WinRT derivation if the deriving type
+ // is not implemented in managed code, and calls through the CCW to the abstract
+ // method. Throw a sensible exception in that case.
+ if (GetMethodTable()->IsExportedToWinRT() && IsAbstract())
+ {
+ COMPlusThrowHR(E_NOTIMPL);
+ }
+#endif // FEATURE_COMINTEROP
+
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
+ }
+
+ COR_ILMETHOD_DECODER::DecoderStatus status = COR_ILMETHOD_DECODER::FORMAT_ERROR;
+ {
+ // Decoder ctor can AV on a malformed method header
+ AVInRuntimeImplOkayHolder AVOkay;
+ pHeader = new (pDecoderMemory) COR_ILMETHOD_DECODER(ilHeader, GetMDImport(), &status);
+ }
+
+ if (status == COR_ILMETHOD_DECODER::FORMAT_ERROR)
+ {
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
+ }
+
+#ifdef _VER_EE_VERIFICATION_ENABLED
+ static ConfigDWORD peVerify;
+
+ if (peVerify.val(CLRConfig::EXTERNAL_PEVerify))
+ m_pMethod->Verify(pHeader, TRUE, FALSE); // Throws a VerifierException if verification fails
+#endif // _VER_EE_VERIFICATION_ENABLED
+
+ return pHeader;
+}
+
+COR_ILMETHOD_DECODER* MethodDesc::GetAndVerifyNoMetadataILHeader()
+{
+ STANDARD_VM_CONTRACT;
+
+ if (IsILStub())
+ {
+ ILStubResolver* pResolver = AsDynamicMethodDesc()->GetILStubResolver();
+ return pResolver->GetILHeader();
+ }
+ else
+ {
+ return NULL;
+ }
+
+ // NoMetadata currently doesn't verify the IL. I'm not sure if that was
+ // a deliberate decision in the past or not, but I've left the behavior
+ // as-is during refactoring.
+}
+
+COR_ILMETHOD_DECODER* MethodDesc::GetAndVerifyILHeader(PrepareCodeConfig* pConfig, COR_ILMETHOD_DECODER* pIlDecoderMemory)
+{
+ STANDARD_VM_CONTRACT;
+ _ASSERTE(IsIL() || IsNoMetadata());
+
+ if (IsNoMetadata())
{
- fStable = TRUE;
+ // The NoMetadata version already has a decoder to use, it doesn't need the stack allocated one
+ return GetAndVerifyNoMetadataILHeader();
}
else
{
- fStable = FALSE;
- flags.Add(CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_TIER0));
+ return GetAndVerifyMetadataILHeader(pConfig, pIlDecoderMemory);
}
+}
+
+// ********************************************************************
+// README!!
+// ********************************************************************
+
+// JitCompileCode is the thread safe way to invoke the JIT compiler
+// If multiple threads get in here for the same config, ALL of them
+// MUST return the SAME value for pcode.
+//
+// This function creates a DeadlockAware list of methods being jitted
+// which prevents us from trying to JIT the same method more that once.
+
+PCODE MethodDesc::JitCompileCode(PrepareCodeConfig* pConfig)
+{
+ STANDARD_VM_CONTRACT;
+
+ LOG((LF_JIT, LL_INFO1000000,
+ "JitCompileCode(" FMT_ADDR ", %s) for %s:%s\n",
+ DBG_ADDR(this),
+ IsILStub() ? " TRUE" : "FALSE",
+ GetMethodTable()->GetDebugClassName(),
+ m_pszDebugMethodName));
+
+#if defined(FEATURE_JIT_PITCHING)
+ CheckStacksAndPitch();
#endif
+ PCODE pCode = NULL;
{
// Enter the global lock which protects the list of all functions being JITd
- ListLockHolder pJitLock (GetDomain()->GetJitLock());
+ JitListLock::LockHolder pJitLock(GetDomain()->GetJitLock());
// It is possible that another thread stepped in before we entered the global lock for the first time.
- pCode = GetNativeCode();
- if (pCode != NULL)
+ if ((pCode = pConfig->IsJitCancellationRequested()))
{
-#ifdef FEATURE_INTERPRETER
- if (Interpreter::InterpretationStubToMethodInfo(pCode) == this)
- {
- pPreviousInterpStub = pCode;
- }
- else
-#endif // FEATURE_INTERPRETER
- goto Done;
+ return pCode;
}
const char *description = "jit lock";
INDEBUG(description = m_pszDebugMethodName;)
- ListLockEntryHolder pEntry(ListLockEntry::Find(pJitLock, this, description));
+ ReleaseHolder<JitListLockEntry> pEntry(JitListLockEntry::Find(
+ pJitLock, pConfig->GetCodeVersion(), description));
// We have an entry now, we can release the global lock
pJitLock.Release();
// Take the entry lock
{
- ListLockEntryLockHolder pEntryLock(pEntry, FALSE);
+ JitListLockEntry::LockHolder pEntryLock(pEntry, FALSE);
if (pEntryLock.DeadlockAwareAcquire())
{
@@ -374,313 +625,458 @@ PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS fla
}
// It is possible that another thread stepped in before we entered the lock.
- pCode = GetNativeCode();
-#ifdef FEATURE_INTERPRETER
- if (pCode != NULL && (pCode != pPreviousInterpStub))
-#else
- if (pCode != NULL)
-#endif // FEATURE_INTERPRETER
+ if ((pCode = pConfig->IsJitCancellationRequested()))
{
- goto Done;
+ return pCode;
}
- SString namespaceOrClassName, methodName, methodSignature;
-
- PCODE pOtherCode = NULL; // Need to move here due to 'goto GotNewCode'
-
-#ifdef FEATURE_MULTICOREJIT
-
- bool fCompiledInBackground = false;
-
- // If not called from multi-core JIT thread,
- if (! fBackgroundThread)
+ pCode = GetMulticoreJitCode();
+ if (pCode != NULL)
{
- // Quick check before calling expensive out of line function on this method's domain has code JITted by background thread
- if (mcJitManager.GetMulticoreJitCodeStorage().GetRemainingMethodCount() > 0)
- {
- if (MulticoreJitManager::IsMethodSupported(this))
- {
- pCode = mcJitManager.RequestMethodCode(this); // Query multi-core JIT manager for compiled code
-
- // Multicore JIT manager starts background thread to pre-compile methods, but it does not back-patch it/notify profiler/notify DAC,
- // Jumtp to GotNewCode to do so
- if (pCode != NULL)
- {
- fCompiledInBackground = true;
-
-#ifdef DEBUGGING_SUPPORTED
- // Notify the debugger of the jitted function
- if (g_pDebugInterface != NULL)
- {
- g_pDebugInterface->JITComplete(this, pCode);
- }
-#endif
-
- goto GotNewCode;
- }
- }
- }
+ pConfig->SetNativeCode(pCode, &pCode);
+ pEntry->m_hrResultCode = S_OK;
+ return pCode;
}
-#endif
-
- if (fIsILStub)
+ else
{
- // we race with other threads to JIT the code for an IL stub and the
- // IL header is released once one of the threads completes. As a result
- // we must be inside the lock to reliably get the IL header for the
- // stub.
-
- ILStubResolver* pResolver = AsDynamicMethodDesc()->GetILStubResolver();
- ILHeader = pResolver->GetILHeader();
+ return JitCompileCodeLockedEventWrapper(pConfig, pEntryLock);
}
+ }
+ }
+}
+
+PCODE MethodDesc::JitCompileCodeLockedEventWrapper(PrepareCodeConfig* pConfig, JitListLockEntry* pEntry)
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pCode = NULL;
+ ULONG sizeOfCode = 0;
+ CORJIT_FLAGS flags;
#ifdef MDA_SUPPORTED
- MdaJitCompilationStart* pProbe = MDA_GET_ASSISTANT(JitCompilationStart);
- if (pProbe)
- pProbe->NowCompiling(this);
+ MdaJitCompilationStart* pProbe = MDA_GET_ASSISTANT(JitCompilationStart);
+ if (pProbe)
+ pProbe->NowCompiling(this);
#endif // MDA_SUPPORTED
#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+ // For methods with non-zero rejit id we send ReJITCompilationStarted, otherwise
+ // JITCompilationStarted. It isn't clear if this is the ideal policy for these
+ // notifications yet.
+ ReJITID rejitId = pConfig->GetCodeVersion().GetILCodeVersionId();
+ if (rejitId != 0)
+ {
+ g_profControlBlock.pProfInterface->ReJITCompilationStarted((FunctionID)this,
+ rejitId,
+ TRUE);
+ }
+ else
// If profiling, need to give a chance for a tool to examine and modify
// the IL before it gets to the JIT. This allows one to add probe calls for
// things like code coverage, performance, or whatever.
+ {
+ if (!IsNoMetadata())
{
- BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+ g_profControlBlock.pProfInterface->JITCompilationStarted((FunctionID)this, TRUE);
-#ifdef FEATURE_MULTICOREJIT
- // Multicore JIT should be disabled when CORProfilerTrackJITInfo is on
- // But there could be corner case in which profiler is attached when multicore background thread is calling MakeJitWorker
- // Disable this block when calling from multicore JIT background thread
- if (!fBackgroundThread)
-#endif
- {
- if (!IsNoMetadata())
- {
- g_profControlBlock.pProfInterface->JITCompilationStarted((FunctionID) this, TRUE);
- // The profiler may have changed the code on the callback. Need to
- // pick up the new code. Note that you have to be fully trusted in
- // this mode and the code will not be verified.
- COR_ILMETHOD *pilHeader = GetILHeader(TRUE);
- new (ILHeader) COR_ILMETHOD_DECODER(pilHeader, GetMDImport(), NULL);
- }
- else
- {
- unsigned int ilSize, unused;
- CorInfoOptions corOptions;
- LPCBYTE ilHeaderPointer = this->AsDynamicMethodDesc()->GetResolver()->GetCodeInfo(&ilSize, &unused, &corOptions, &unused);
-
- g_profControlBlock.pProfInterface->DynamicMethodJITCompilationStarted((FunctionID) this, TRUE, ilHeaderPointer, ilSize);
- }
- }
- END_PIN_PROFILER();
}
-#endif // PROFILING_SUPPORTED
-#ifdef FEATURE_INTERPRETER
- // We move the ETW event for start of JITting inward, after we make the decision
- // to JIT rather than interpret.
-#else // FEATURE_INTERPRETER
- // Fire an ETW event to mark the beginning of JIT'ing
- ETW::MethodLog::MethodJitting(this, &namespaceOrClassName, &methodName, &methodSignature);
-#endif // FEATURE_INTERPRETER
-
-#ifdef FEATURE_STACK_SAMPLING
-#ifdef FEATURE_MULTICOREJIT
- if (!fBackgroundThread)
-#endif // FEATURE_MULTICOREJIT
+ else
{
- StackSampler::RecordJittingInfo(this, flags);
- }
-#endif // FEATURE_STACK_SAMPLING
+ unsigned int ilSize, unused;
+ CorInfoOptions corOptions;
+ LPCBYTE ilHeaderPointer = this->AsDynamicMethodDesc()->GetResolver()->GetCodeInfo(&ilSize, &unused, &corOptions, &unused);
- EX_TRY
- {
- pCode = UnsafeJitFunction(this, ILHeader, flags, &sizeOfCode);
- }
- EX_CATCH
- {
- // If the current thread threw an exception, but a competing thread
- // somehow succeeded at JITting the same function (e.g., out of memory
- // encountered on current thread but not competing thread), then go ahead
- // and swallow this current thread's exception, since we somehow managed
- // to successfully JIT the code on the other thread.
- //
- // Note that if a deadlock cycle is broken, that does not result in an
- // exception--the thread would just pass through the lock and JIT the
- // function in competition with the other thread (with the winner of the
- // race decided later on when we do SetNativeCodeInterlocked). This
- // try/catch is purely to deal with the (unusual) case where a competing
- // thread succeeded where we aborted.
-
- pOtherCode = GetNativeCode();
-
- if (pOtherCode == NULL)
- {
- pEntry->m_hrResultCode = E_FAIL;
- EX_RETHROW;
- }
+ g_profControlBlock.pProfInterface->DynamicMethodJITCompilationStarted((FunctionID)this, TRUE, ilHeaderPointer, ilSize);
}
- EX_END_CATCH(RethrowTerminalExceptions)
+ }
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
- if (pOtherCode != NULL)
- {
- // Somebody finished jitting recursively while we were jitting the method.
- // Just use their method & leak the one we finished. (Normally we hope
- // not to finish our JIT in this case, as we will abort early if we notice
- // a reentrant jit has occurred. But we may not catch every place so we
- // do a definitive final check here.
- pCode = pOtherCode;
- goto Done;
- }
+ if (!ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
+ TRACE_LEVEL_VERBOSE,
+ CLR_JIT_KEYWORD))
+ {
+ pCode = JitCompileCodeLocked(pConfig, pEntry, &sizeOfCode, &flags);
+ }
+ else
+ {
+ SString namespaceOrClassName, methodName, methodSignature;
- _ASSERTE(pCode != NULL);
+ // Methods that may be interpreted defer this notification until it is certain
+ // we are jitting and not interpreting in CompileMethodWithEtwWrapper.
+ // Some further refactoring could consolidate the notification to always
+ // occur at the point the interpreter does it, but it might even better
+ // to fix the issues that cause us to avoid generating jit notifications
+ // for interpreted methods in the first place. The interpreter does generate
+ // a small stub of native code but no native-IL mapping.
+#ifndef FEATURE_INTERPRETER
+ ETW::MethodLog::MethodJitting(this,
+ &namespaceOrClassName,
+ &methodName,
+ &methodSignature);
+#endif
-#ifdef HAVE_GCCOVER
- if (GCStress<cfg_instr_jit>::IsEnabled())
- {
- SetupGcCoverage(this, (BYTE*) pCode);
- }
-#endif // HAVE_GCCOVER
+ pCode = JitCompileCodeLocked(pConfig, pEntry, &sizeOfCode, &flags);
+ // Interpretted methods skip this notification
#ifdef FEATURE_INTERPRETER
- // Determine whether the new code address is "stable"...= is not an interpreter stub.
- fInterpreted = (Interpreter::InterpretationStubToMethodInfo(pCode) == this);
- fStable = !fInterpreted;
-#endif // FEATURE_INTERPRETER
-
-#ifdef FEATURE_MULTICOREJIT
-
- // If called from multi-core JIT background thread, store code under lock, delay patching until code is queried from application threads
- if (fBackgroundThread)
- {
- // Fire an ETW event to mark the end of JIT'ing
- ETW::MethodLog::MethodJitted(this, &namespaceOrClassName, &methodName, &methodSignature, pCode, 0 /* ReJITID */);
-
-#ifdef FEATURE_PERFMAP
- // Save the JIT'd method information so that perf can resolve JIT'd call frames.
- PerfMap::LogJITCompiledMethod(this, pCode, sizeOfCode);
+ if (Interpreter::InterpretationStubToMethodInfo(pCode) == NULL)
#endif
-
- mcJitManager.GetMulticoreJitCodeStorage().StoreMethodCode(this, pCode);
-
- goto Done;
- }
+ {
+ // Fire an ETW event to mark the end of JIT'ing
+ ETW::MethodLog::MethodJitted(this,
+ &namespaceOrClassName,
+ &methodName,
+ &methodSignature,
+ pCode,
+ pConfig->GetCodeVersion().GetVersionId());
+ }
-GotNewCode:
-#endif
- // If this function had already been requested for rejit (before its original
- // code was jitted), then give the rejit manager a chance to jump-stamp the
- // code we just compiled so the first thread entering the function will jump
- // to the prestub and trigger the rejit. Note that the PublishMethodHolder takes
- // a lock to avoid a particular kind of rejit race. See
- // code:ReJitManager::PublishMethodHolder::PublishMethodHolder#PublishCode for
- // details on the rejit race.
- //
- // Aside from rejit, performing a SetNativeCodeInterlocked at this point
- // generally ensures that there is only one winning version of the native
- // code. This also avoid races with profiler overriding ngened code (see
- // matching SetNativeCodeInterlocked done after
- // JITCachedFunctionSearchStarted)
-#ifdef FEATURE_INTERPRETER
- PCODE pExpected = pPreviousInterpStub;
- if (pExpected == NULL) pExpected = GetTemporaryEntryPoint();
-#endif
- {
- ReJitPublishMethodHolder publishWorker(this, pCode);
- if (!SetNativeCodeInterlocked(pCode
-#ifdef FEATURE_INTERPRETER
- , pExpected, fStable
-#endif
- ))
- {
- // Another thread beat us to publishing its copy of the JITted code.
- pCode = GetNativeCode();
- goto Done;
- }
- }
+ }
-#ifdef FEATURE_INTERPRETER
- // State for dynamic methods cannot be freed if the method was ever interpreted,
- // since there is no way to ensure that it is not in use at the moment.
- if (IsDynamicMethod() && !fInterpreted && (pPreviousInterpStub == NULL))
- {
- AsDynamicMethodDesc()->GetResolver()->FreeCompileTimeState();
- }
-#endif // FEATURE_INTERPRETER
+#ifdef FEATURE_STACK_SAMPLING
+ StackSampler::RecordJittingInfo(this, flags);
+#endif // FEATURE_STACK_SAMPLING
- // We succeeded in jitting the code, and our jitted code is the one that's going to run now.
- pEntry->m_hrResultCode = S_OK;
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+ // For methods with non-zero rejit id we send ReJITCompilationFinished, otherwise
+ // JITCompilationFinished. It isn't clear if this is the ideal policy for these
+ // notifications yet.
+ ReJITID rejitId = pConfig->GetCodeVersion().GetILCodeVersionId();
+ if (rejitId != 0)
+ {
- #ifdef PROFILING_SUPPORTED
+ g_profControlBlock.pProfInterface->ReJITCompilationFinished((FunctionID)this,
+ rejitId,
+ S_OK,
+ TRUE);
+ }
+ else
// Notify the profiler that JIT completed.
// Must do this after the address has been set.
// @ToDo: Why must we set the address before notifying the profiler ??
- // Note that if IsInterceptedForDeclSecurity is set no one should access the jitted code address anyway.
+ {
+ if (!IsNoMetadata())
{
- BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
- if (!IsNoMetadata())
- {
- g_profControlBlock.pProfInterface->
- JITCompilationFinished((FunctionID) this,
- pEntry->m_hrResultCode,
- TRUE);
- }
- else
- {
- g_profControlBlock.pProfInterface->DynamicMethodJITCompilationFinished((FunctionID) this, pEntry->m_hrResultCode, TRUE);
- }
- END_PIN_PROFILER();
+ g_profControlBlock.pProfInterface->
+ JITCompilationFinished((FunctionID)this,
+ pEntry->m_hrResultCode,
+ TRUE);
+ }
+ else
+ {
+ g_profControlBlock.pProfInterface->DynamicMethodJITCompilationFinished((FunctionID)this, pEntry->m_hrResultCode, TRUE);
}
+ }
+ END_PIN_PROFILER();
+ }
#endif // PROFILING_SUPPORTED
-#ifdef FEATURE_MULTICOREJIT
- if (! fCompiledInBackground)
-#endif
#ifdef FEATURE_INTERPRETER
- // If we didn't JIT, but rather, created an interpreter stub (i.e., fStable is false), don't tell ETW that we did.
- if (fStable)
-#endif // FEATURE_INTERPRETER
- {
- // Fire an ETW event to mark the end of JIT'ing
- ETW::MethodLog::MethodJitted(this, &namespaceOrClassName, &methodName, &methodSignature, pCode, 0 /* ReJITID */);
+ bool isJittedMethod = (Interpreter::InterpretationStubToMethodInfo(pCode) == NULL);
+#endif
+ // Interpretted methods skip this notification
+#ifdef FEATURE_INTERPRETER
+ if (isJittedMethod)
+#endif
+ {
#ifdef FEATURE_PERFMAP
- // Save the JIT'd method information so that perf can resolve JIT'd call frames.
- PerfMap::LogJITCompiledMethod(this, pCode, sizeOfCode);
+ // Save the JIT'd method information so that perf can resolve JIT'd call frames.
+ PerfMap::LogJITCompiledMethod(this, pCode, sizeOfCode);
#endif
- }
-
+ }
-#ifdef FEATURE_MULTICOREJIT
- // If not called from multi-core JIT thread, not got code from storage, quick check before calling out of line function
- if (! fBackgroundThread && ! fCompiledInBackground && mcJitManager.IsRecorderActive())
+#ifdef FEATURE_MULTICOREJIT
+ // Non-initial code versions and multicore jit initial compilation all skip this
+ if (pConfig->NeedsMulticoreJitNotification())
+ {
+ MulticoreJitManager & mcJitManager = GetAppDomain()->GetMulticoreJitManager();
+ if (mcJitManager.IsRecorderActive())
+ {
+ if (MulticoreJitManager::IsMethodSupported(this))
{
- if (MulticoreJitManager::IsMethodSupported(this))
- {
- mcJitManager.RecordMethodJit(this); // Tell multi-core JIT manager to record method on successful JITting
- }
+ mcJitManager.RecordMethodJit(this); // Tell multi-core JIT manager to record method on successful JITting
}
+ }
+ }
#endif
- if (!fIsILStub)
- {
- // The notification will only occur if someone has registered for this method.
- DACNotifyCompilationFinished(this);
- }
+#ifdef FEATURE_INTERPRETER
+ if (isJittedMethod)
+#endif
+ {
+ // The notification will only occur if someone has registered for this method.
+ DACNotifyCompilationFinished(this);
+ }
+
+ return pCode;
+}
+
+PCODE MethodDesc::JitCompileCodeLocked(PrepareCodeConfig* pConfig, JitListLockEntry* pEntry, ULONG* pSizeOfCode, CORJIT_FLAGS* pFlags)
+{
+ STANDARD_VM_CONTRACT;
+
+ PCODE pCode = NULL;
+
+ // The profiler may have changed the code on the callback. Need to
+ // pick up the new code.
+ COR_ILMETHOD_DECODER ilDecoderTemp;
+ COR_ILMETHOD_DECODER *pilHeader = GetAndVerifyILHeader(pConfig, &ilDecoderTemp);
+ *pFlags = pConfig->GetJitCompilationFlags();
+ PCODE pOtherCode = NULL;
+ EX_TRY
+ {
+ pCode = UnsafeJitFunction(this, pilHeader, *pFlags, pSizeOfCode);
+ }
+ EX_CATCH
+ {
+ // If the current thread threw an exception, but a competing thread
+ // somehow succeeded at JITting the same function (e.g., out of memory
+ // encountered on current thread but not competing thread), then go ahead
+ // and swallow this current thread's exception, since we somehow managed
+ // to successfully JIT the code on the other thread.
+ //
+ // Note that if a deadlock cycle is broken, that does not result in an
+ // exception--the thread would just pass through the lock and JIT the
+ // function in competition with the other thread (with the winner of the
+ // race decided later on when we do SetNativeCodeInterlocked). This
+ // try/catch is purely to deal with the (unusual) case where a competing
+ // thread succeeded where we aborted.
+
+ if (!(pOtherCode = pConfig->IsJitCancellationRequested()))
+ {
+ pEntry->m_hrResultCode = E_FAIL;
+ EX_RETHROW;
}
}
+ EX_END_CATCH(RethrowTerminalExceptions)
-Done:
+ if (pOtherCode != NULL)
+ {
+ // Somebody finished jitting recursively while we were jitting the method.
+ // Just use their method & leak the one we finished. (Normally we hope
+ // not to finish our JIT in this case, as we will abort early if we notice
+ // a reentrant jit has occurred. But we may not catch every place so we
+ // do a definitive final check here.
+ return pOtherCode;
+ }
- // We must have a code by now.
_ASSERTE(pCode != NULL);
+
+ // Aside from rejit, performing a SetNativeCodeInterlocked at this point
+ // generally ensures that there is only one winning version of the native
+ // code. This also avoid races with profiler overriding ngened code (see
+ // matching SetNativeCodeInterlocked done after
+ // JITCachedFunctionSearchStarted)
+ {
+ if (!pConfig->SetNativeCode(pCode, &pOtherCode))
+ {
+ // Another thread beat us to publishing its copy of the JITted code.
+ return pOtherCode;
+ }
+#if defined(FEATURE_JIT_PITCHING)
+ else
+ {
+ SavePitchingCandidate(this, sizeOfCode);
+ }
+#endif
+ }
+
+#ifdef HAVE_GCCOVER
+ if (GCStress<cfg_instr_jit>::IsEnabled())
+ {
+ SetupGcCoverage(this, (BYTE*)pCode);
+ }
+#endif // HAVE_GCCOVER
- LOG((LF_CORDB, LL_EVERYTHING, "MethodDesc::MakeJitWorker finished. Stub is" FMT_ADDR "\n",
- DBG_ADDR(pCode)));
+ // We succeeded in jitting the code, and our jitted code is the one that's going to run now.
+ pEntry->m_hrResultCode = S_OK;
return pCode;
}
+
+
+PrepareCodeConfig::PrepareCodeConfig() {}
+
+PrepareCodeConfig::PrepareCodeConfig(NativeCodeVersion codeVersion, BOOL needsMulticoreJitNotification, BOOL mayUsePrecompiledCode) :
+ m_pMethodDesc(codeVersion.GetMethodDesc()),
+ m_nativeCodeVersion(codeVersion),
+ m_needsMulticoreJitNotification(needsMulticoreJitNotification),
+ m_mayUsePrecompiledCode(mayUsePrecompiledCode)
+{}
+
+MethodDesc* PrepareCodeConfig::GetMethodDesc()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pMethodDesc;
+}
+
+PCODE PrepareCodeConfig::IsJitCancellationRequested()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_pMethodDesc->GetNativeCode();
+}
+
+BOOL PrepareCodeConfig::NeedsMulticoreJitNotification()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_needsMulticoreJitNotification;
+}
+
+NativeCodeVersion PrepareCodeConfig::GetCodeVersion()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_nativeCodeVersion;
+}
+
+BOOL PrepareCodeConfig::SetNativeCode(PCODE pCode, PCODE * ppAlternateCodeToUse)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ // If this function had already been requested for rejit (before its original
+ // code was jitted), then give the CodeVersionManager a chance to jump-stamp the
+ // code we just compiled so the first thread entering the function will jump
+ // to the prestub and trigger the rejit. Note that the PublishMethodHolder takes
+ // a lock to avoid a particular kind of rejit race. See
+ // code:CodeVersionManager::PublishMethodHolder::PublishMethodHolder#PublishCode for
+ // details on the rejit race.
+ //
+ if (m_pMethodDesc->IsVersionableWithJumpStamp())
+ {
+ PublishMethodHolder publishWorker(GetMethodDesc(), pCode);
+ if (m_pMethodDesc->SetNativeCodeInterlocked(pCode, NULL))
+ {
+ return TRUE;
+ }
+ }
+ else
+ {
+ if (m_pMethodDesc->SetNativeCodeInterlocked(pCode, NULL))
+ {
+ return TRUE;
+ }
+ }
+
+ *ppAlternateCodeToUse = m_pMethodDesc->GetNativeCode();
+ return FALSE;
+}
+
+COR_ILMETHOD* PrepareCodeConfig::GetILHeader()
+{
+ STANDARD_VM_CONTRACT;
+ return m_pMethodDesc->GetILHeader(TRUE);
+}
+
+CORJIT_FLAGS PrepareCodeConfig::GetJitCompilationFlags()
+{
+ STANDARD_VM_CONTRACT;
+
+ CORJIT_FLAGS flags;
+ if (m_pMethodDesc->IsILStub())
+ {
+ ILStubResolver* pResolver = m_pMethodDesc->AsDynamicMethodDesc()->GetILStubResolver();
+ flags = pResolver->GetJitFlags();
+ }
+#ifdef FEATURE_TIERED_COMPILATION
+ flags.Add(TieredCompilationManager::GetJitFlags(m_nativeCodeVersion));
+#endif
+ return flags;
+}
+
+BOOL PrepareCodeConfig::MayUsePrecompiledCode()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_mayUsePrecompiledCode;
+}
+
+#ifdef FEATURE_CODE_VERSIONING
+VersionedPrepareCodeConfig::VersionedPrepareCodeConfig() {}
+
+VersionedPrepareCodeConfig::VersionedPrepareCodeConfig(NativeCodeVersion codeVersion) :
+ PrepareCodeConfig(codeVersion, TRUE, FALSE)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ _ASSERTE(!m_nativeCodeVersion.IsDefaultVersion());
+ _ASSERTE(m_pMethodDesc->GetCodeVersionManager()->LockOwnedByCurrentThread());
+ m_ilCodeVersion = m_nativeCodeVersion.GetILCodeVersion();
+}
+
+HRESULT VersionedPrepareCodeConfig::FinishConfiguration()
+{
+ STANDARD_VM_CONTRACT;
+
+ _ASSERTE(!GetMethodDesc()->GetCodeVersionManager()->LockOwnedByCurrentThread());
+
+ // Any code build stages that do just in time configuration should
+ // be configured now
+#ifdef FEATURE_REJIT
+ if (m_ilCodeVersion.GetRejitState() != ILCodeVersion::kStateActive)
+ {
+ ReJitManager::ConfigureILCodeVersion(m_ilCodeVersion);
+ }
+ _ASSERTE(m_ilCodeVersion.GetRejitState() == ILCodeVersion::kStateActive);
+#endif
+
+ return S_OK;
+}
+
+PCODE VersionedPrepareCodeConfig::IsJitCancellationRequested()
+{
+ LIMITED_METHOD_CONTRACT;
+ return m_nativeCodeVersion.GetNativeCode();
+}
+
+BOOL VersionedPrepareCodeConfig::SetNativeCode(PCODE pCode, PCODE * ppAlternateCodeToUse)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ //This isn't the default version so jumpstamp is never needed
+ _ASSERTE(!m_nativeCodeVersion.IsDefaultVersion());
+ if (m_nativeCodeVersion.SetNativeCodeInterlocked(pCode, NULL))
+ {
+ return TRUE;
+ }
+ else
+ {
+ *ppAlternateCodeToUse = m_nativeCodeVersion.GetNativeCode();
+ return FALSE;
+ }
+}
+
+COR_ILMETHOD* VersionedPrepareCodeConfig::GetILHeader()
+{
+ STANDARD_VM_CONTRACT;
+ return m_ilCodeVersion.GetIL();
+}
+
+CORJIT_FLAGS VersionedPrepareCodeConfig::GetJitCompilationFlags()
+{
+ STANDARD_VM_CONTRACT;
+ CORJIT_FLAGS flags;
+
+#ifdef FEATURE_REJIT
+ DWORD profilerFlags = m_ilCodeVersion.GetJitFlags();
+ flags.Add(ReJitManager::JitFlagsFromProfCodegenFlags(profilerFlags));
+#endif
+
+#ifdef FEATURE_TIERED_COMPILATION
+ flags.Add(TieredCompilationManager::GetJitFlags(m_nativeCodeVersion));
+#endif
+
+ return flags;
+}
+
+#endif //FEATURE_CODE_VERSIONING
+
#ifdef FEATURE_STUBS_AS_IL
// CreateInstantiatingILStubTargetSig:
@@ -1264,21 +1660,6 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
GCStress<cfg_any, EeconfigFastGcSPolicy, CoopGcModePolicy>::MaybeTrigger();
- // Are we in the prestub because of a rejit request? If so, let the ReJitManager
- // take it from here.
- pCode = ReJitManager::DoReJitIfNecessary(this);
- if (pCode != NULL)
- {
- // A ReJIT was performed, so nothing left for DoPrestub() to do. Return now.
- //
- // The stable entrypoint will either be a pointer to the original JITted code
- // (with a jmp at the top to jump to the newly-rejitted code) OR a pointer to any
- // stub code that must be executed first (e.g., a remoting stub), which in turn
- // will call the original JITted code (which then jmps to the newly-rejitted
- // code).
- RETURN GetStableEntryPoint();
- }
-
#ifdef FEATURE_COMINTEROP
/************************** INTEROP *************************/
@@ -1317,40 +1698,54 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
pMT->CheckRunClassInitThrowing();
}
- /************************** BACKPATCHING *************************/
- // See if the addr of code has changed from the pre-stub
-#ifdef FEATURE_INTERPRETER
- if (!IsReallyPointingToPrestub())
-#else
- if (!IsPointingToPrestub())
+
+ /*************************** CALL COUNTER ***********************/
+ // If we are counting calls for tiered compilation, leave the prestub
+ // in place so that we can continue intercepting method invocations.
+ // When the TieredCompilationManager has received enough call notifications
+ // for this method only then do we back-patch it.
+ BOOL fCanBackpatchPrestub = TRUE;
+#ifdef FEATURE_TIERED_COMPILATION
+ BOOL fEligibleForTieredCompilation = IsEligibleForTieredCompilation();
+ if (fEligibleForTieredCompilation)
+ {
+ CallCounter * pCallCounter = GetCallCounter();
+ fCanBackpatchPrestub = pCallCounter->OnMethodCalled(this);
+ }
#endif
+
+ /*************************** VERSIONABLE CODE *********************/
+
+ BOOL fIsPointingToPrestub = IsPointingToPrestub();
+#ifdef FEATURE_CODE_VERSIONING
+ if (IsVersionableWithPrecode() ||
+ (!fIsPointingToPrestub && IsVersionableWithJumpStamp()))
{
- // If we are counting calls for tiered compilation, leave the prestub
- // in place so that we can continue intercepting method invocations.
- // When the TieredCompilationManager has received enough call notifications
- // for this method only then do we back-patch it.
-#ifdef FEATURE_TIERED_COMPILATION
- PCODE pNativeCode = GetNativeCode();
- if (pNativeCode && IsEligibleForTieredCompilation())
- {
- CallCounter * pCallCounter = GetAppDomain()->GetCallCounter();
- BOOL doBackPatch = pCallCounter->OnMethodCalled(this);
- if (!doBackPatch)
- {
- return pNativeCode;
- }
- }
+ pCode = GetCodeVersionManager()->PublishVersionableCodeIfNecessary(this, fCanBackpatchPrestub);
+ fIsPointingToPrestub = IsPointingToPrestub();
+ }
#endif
+
+ /************************** BACKPATCHING *************************/
+ // See if the addr of code has changed from the pre-stub
+ if (!fIsPointingToPrestub)
+ {
LOG((LF_CLASSLOADER, LL_INFO10000,
" In PreStubWorker, method already jitted, backpatching call point\n"));
-
+#if defined(FEATURE_JIT_PITCHING)
+ MarkMethodNotPitchingCandidate(this);
+#endif
RETURN DoBackpatch(pMT, pDispatchingMT, TRUE);
}
-
- // record if remoting needs to intercept this call
- BOOL fRemotingIntercepted = IsRemotingInterceptedViaPrestub();
-
- BOOL fReportCompilationFinished = FALSE;
+
+ if (pCode)
+ {
+ // The only reason we are still pointing to prestub is because the call counter
+ // prevented it. We should still short circuit and return the code without
+ // backpatching.
+ _ASSERTE(!fCanBackpatchPrestub);
+ RETURN pCode;
+ }
/************************** CODE CREATION *************************/
if (IsUnboxingStub())
@@ -1365,209 +1760,11 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
#endif // defined(FEATURE_SHARE_GENERIC_CODE)
else if (IsIL() || IsNoMetadata())
{
- // remember if we need to backpatch the MethodTable slot
- BOOL fBackpatch = !fRemotingIntercepted
- && IsNativeCodeStableAfterInit();
-
-#ifdef FEATURE_PREJIT
- //
- // See if we have any prejitted code to use.
- //
-
- pCode = GetPreImplementedCode();
-
-#ifdef PROFILING_SUPPORTED
- if (pCode != NULL)
- {
- BOOL fShouldSearchCache = TRUE;
-
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
- g_profControlBlock.pProfInterface->
- JITCachedFunctionSearchStarted((FunctionID) this,
- &fShouldSearchCache);
- END_PIN_PROFILER();
- }
-
- if (!fShouldSearchCache)
- {
-#ifdef FEATURE_INTERPRETER
- SetNativeCodeInterlocked(NULL, pCode, FALSE);
-#else
- SetNativeCodeInterlocked(NULL, pCode);
-#endif
- _ASSERTE(!IsPreImplemented());
- pCode = NULL;
- }
- }
-#endif // PROFILING_SUPPORTED
-
- if (pCode != NULL)
+ if (!IsNativeCodeStableAfterInit())
{
- LOG((LF_ZAP, LL_INFO10000,
- "ZAP: Using code" FMT_ADDR "for %s.%s sig=\"%s\" (token %x).\n",
- DBG_ADDR(pCode),
- m_pszDebugClassName,
- m_pszDebugMethodName,
- m_pszDebugMethodSignature,
- GetMemberDef()));
-
- TADDR pFixupList = GetFixupList();
- if (pFixupList != NULL)
- {
- Module *pZapModule = GetZapModule();
- _ASSERTE(pZapModule != NULL);
- if (!pZapModule->FixupDelayList(pFixupList))
- {
- _ASSERTE(!"FixupDelayList failed");
- ThrowHR(COR_E_BADIMAGEFORMAT);
- }
- }
-
-#ifdef HAVE_GCCOVER
- if (GCStress<cfg_instr_ngen>::IsEnabled())
- SetupGcCoverage(this, (BYTE*) pCode);
-#endif // HAVE_GCCOVER
-
-#ifdef PROFILING_SUPPORTED
- /*
- * This notifies the profiler that a search to find a
- * cached jitted function has been made.
- */
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
- g_profControlBlock.pProfInterface->
- JITCachedFunctionSearchFinished((FunctionID) this, COR_PRF_CACHED_FUNCTION_FOUND);
- END_PIN_PROFILER();
- }
-#endif // PROFILING_SUPPORTED
- }
-
- //
- // If not, try to jit it
- //
-
-#endif // FEATURE_PREJIT
-
-#ifdef FEATURE_READYTORUN
- if (pCode == NULL)
- {
- Module * pModule = GetModule();
- if (pModule->IsReadyToRun())
- {
- pCode = pModule->GetReadyToRunInfo()->GetEntryPoint(this);
- if (pCode != NULL)
- fReportCompilationFinished = TRUE;
- }
+ GetOrCreatePrecode();
}
-#endif // FEATURE_READYTORUN
-
- if (pCode == NULL)
- {
- NewHolder<COR_ILMETHOD_DECODER> pHeader(NULL);
- // Get the information on the method
- if (!IsNoMetadata())
- {
- COR_ILMETHOD* ilHeader = GetILHeader(TRUE);
- if(ilHeader == NULL)
- {
-#ifdef FEATURE_COMINTEROP
- // Abstract methods can be called through WinRT derivation if the deriving type
- // is not implemented in managed code, and calls through the CCW to the abstract
- // method. Throw a sensible exception in that case.
- if (pMT->IsExportedToWinRT() && IsAbstract())
- {
- COMPlusThrowHR(E_NOTIMPL);
- }
-#endif // FEATURE_COMINTEROP
-
- COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
- }
-
- COR_ILMETHOD_DECODER::DecoderStatus status = COR_ILMETHOD_DECODER::FORMAT_ERROR;
-
- {
- // Decoder ctor can AV on a malformed method header
- AVInRuntimeImplOkayHolder AVOkay;
- pHeader = new COR_ILMETHOD_DECODER(ilHeader, GetMDImport(), &status);
- if(pHeader == NULL)
- status = COR_ILMETHOD_DECODER::FORMAT_ERROR;
- }
-
- if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR &&
- Security::CanSkipVerification(GetModule()->GetDomainAssembly()))
- {
- status = COR_ILMETHOD_DECODER::SUCCESS;
- }
-
- if (status != COR_ILMETHOD_DECODER::SUCCESS)
- {
- if (status == COR_ILMETHOD_DECODER::VERIFICATION_ERROR)
- {
- // Throw a verification HR
- COMPlusThrowHR(COR_E_VERIFICATION);
- }
- else
- {
- COMPlusThrowHR(COR_E_BADIMAGEFORMAT, BFA_BAD_IL);
- }
- }
-
-#ifdef _VER_EE_VERIFICATION_ENABLED
- static ConfigDWORD peVerify;
-
- if (peVerify.val(CLRConfig::EXTERNAL_PEVerify))
- Verify(pHeader, TRUE, FALSE); // Throws a VerifierException if verification fails
-#endif // _VER_EE_VERIFICATION_ENABLED
- } // end if (!IsNoMetadata())
-
- // JIT it
- LOG((LF_CLASSLOADER, LL_INFO1000000,
- " In PreStubWorker, calling MakeJitWorker\n"));
-
- // Create the precode eagerly if it is going to be needed later.
- if (!fBackpatch)
- {
- GetOrCreatePrecode();
- }
-
- // Mark the code as hot in case the method ends up in the native image
- g_IBCLogger.LogMethodCodeAccess(this);
-
- pCode = MakeJitWorker(pHeader, CORJIT_FLAGS());
-
-#ifdef FEATURE_INTERPRETER
- if ((pCode != NULL) && !HasStableEntryPoint())
- {
- // We don't yet have a stable entry point, so don't do backpatching yet.
- // But we do have to handle some extra cases that occur in backpatching.
- // (Perhaps I *should* get to the backpatching code, but in a mode where we know
- // we're not dealing with the stable entry point...)
- if (HasNativeCodeSlot())
- {
- // We called "SetNativeCodeInterlocked" in MakeJitWorker, which updated the native
- // code slot, but I think we also want to update the regular slot...
- PCODE tmpEntry = GetTemporaryEntryPoint();
- PCODE pFound = FastInterlockCompareExchangePointer(GetAddrOfSlot(), pCode, tmpEntry);
- // Doesn't matter if we failed -- if we did, it's because somebody else made progress.
- if (pFound != tmpEntry) pCode = pFound;
- }
-
- // Now we handle the case of a FuncPtrPrecode.
- FuncPtrStubs * pFuncPtrStubs = GetLoaderAllocator()->GetFuncPtrStubsNoCreate();
- if (pFuncPtrStubs != NULL)
- {
- Precode* pFuncPtrPrecode = pFuncPtrStubs->Lookup(this);
- if (pFuncPtrPrecode != NULL)
- {
- // If there is a funcptr precode to patch, attempt to patch it. If we lose, that's OK,
- // somebody else made progress.
- pFuncPtrPrecode->SetTargetInterlocked(pCode);
- }
- }
- }
-#endif // FEATURE_INTERPRETER
- } // end if (pCode == NULL)
+ pCode = PrepareInitialCode();
} // end else if (IsIL() || IsNoMetadata())
else if (IsNDirect())
{
@@ -1603,13 +1800,7 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
}
/************************** POSTJIT *************************/
-#ifndef FEATURE_INTERPRETER
_ASSERTE(pCode == NULL || GetNativeCode() == NULL || pCode == GetNativeCode());
-#else // FEATURE_INTERPRETER
- // Interpreter adds a new possiblity == someone else beat us to installing an intepreter stub.
- _ASSERTE(pCode == NULL || GetNativeCode() == NULL || pCode == GetNativeCode()
- || Interpreter::InterpretationStubToMethodInfo(pCode) == this);
-#endif // FEATURE_INTERPRETER
// At this point we must have either a pointer to managed code or to a stub. All of the above code
// should have thrown an exception if it couldn't make a stub.
@@ -1638,42 +1829,15 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
MemoryBarrier();
#endif
- // If we are counting calls for tiered compilation, leave the prestub
- // in place so that we can continue intercepting method invocations.
- // When the TieredCompilationManager has received enough call notifications
- // for this method only then do we back-patch it.
-#ifdef FEATURE_TIERED_COMPILATION
- if (pCode && IsEligibleForTieredCompilation())
- {
- CallCounter * pCallCounter = GetAppDomain()->GetCallCounter();
- BOOL doBackPatch = pCallCounter->OnMethodCalled(this);
- if (!doBackPatch)
- {
- return pCode;
- }
- }
-#endif
-
if (pCode != NULL)
{
if (HasPrecode())
GetPrecode()->SetTargetInterlocked(pCode);
else
- if (!HasStableEntryPoint())
- {
- // Is the result an interpreter stub?
-#ifdef FEATURE_INTERPRETER
- if (Interpreter::InterpretationStubToMethodInfo(pCode) == this)
- {
- SetEntryPointInterlocked(pCode);
- }
- else
-#endif // FEATURE_INTERPRETER
+ if (!HasStableEntryPoint())
{
- ReJitPublishMethodHolder publishWorker(this, pCode);
SetStableEntryPointInterlocked(pCode);
}
- }
}
else
{
@@ -1690,15 +1854,8 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
}
}
-#ifdef FEATURE_INTERPRETER
- _ASSERTE(!IsReallyPointingToPrestub());
-#else // FEATURE_INTERPRETER
_ASSERTE(!IsPointingToPrestub());
_ASSERTE(HasStableEntryPoint());
-#endif // FEATURE_INTERPRETER
-
- if (fReportCompilationFinished)
- DACNotifyCompilationFinished(this);
RETURN DoBackpatch(pMT, pDispatchingMT, FALSE);
}
@@ -2127,6 +2284,10 @@ EXTERN_C PCODE STDCALL ExternalMethodFixupWorker(TransitionBlock * pTransitionBl
pCode = PatchNonVirtualExternalMethod(pMD, pCode, pImportSection, pIndirection);
}
}
+
+#if defined (FEATURE_JIT_PITCHING)
+ DeleteFromPitchingCandidate(pMD);
+#endif
}
// Force a GC on every jit if the stress level is high enough
@@ -2385,6 +2546,7 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
pResult->signature = NULL;
pResult->indirectFirstOffset = 0;
+ pResult->indirectSecondOffset = 0;
pResult->indirections = CORINFO_USEHELPER;
@@ -2424,6 +2586,9 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
//
// Optimization cases
//
+ // TODO-ARM : If the optimization cases are implemented in CreateDictionaryLookupHelper,
+ // It's ifndef for ARM will be removed.
+#ifndef _TARGET_ARM_
if (signatureKind == ENCODE_TYPE_HANDLE)
{
SigPointer sigptr(pBlob, -1);
@@ -2457,9 +2622,16 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
IfFailThrow(sigptr.GetData(&data));
pResult->offsets[2] = sizeof(TypeHandle) * data;
+ if (MethodTable::IsPerInstInfoRelative())
+ {
+ pResult->indirectFirstOffset = 1;
+ pResult->indirectSecondOffset = 1;
+ }
+
return;
}
}
+#endif // !_TARGET_ARM_
if (pContextMT != NULL && pContextMT->GetNumDicts() > 0xFFFF)
ThrowHR(COR_E_BADIMAGEFORMAT);
@@ -2502,6 +2674,12 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
// Next indirect through the dictionary appropriate to this instantiated type
pResult->offsets[1] = sizeof(TypeHandle*) * (pContextMT->GetNumDicts() - 1);
+ if (MethodTable::IsPerInstInfoRelative())
+ {
+ pResult->indirectFirstOffset = 1;
+ pResult->indirectSecondOffset = 1;
+ }
+
*pDictionaryIndexAndSlot |= dictionarySlot;
}
}
diff --git a/src/vm/profilingenumerators.cpp b/src/vm/profilingenumerators.cpp
index 5044eb7c2b..2406f5aa42 100644
--- a/src/vm/profilingenumerators.cpp
+++ b/src/vm/profilingenumerators.cpp
@@ -79,7 +79,7 @@ BOOL ProfilerFunctionEnum::Init(BOOL fWithReJITIDs)
if (fWithReJITIDs)
{
// This guy causes triggering and locking, while the non-rejitid case does not.
- element->reJitId = pMD->GetReJitManager()->GetReJitId(pMD, heapIterator.GetMethodCode());
+ element->reJitId = ReJitManager::GetReJitId(pMD, heapIterator.GetMethodCode());
}
else
{
diff --git a/src/vm/proftoeeinterfaceimpl.cpp b/src/vm/proftoeeinterfaceimpl.cpp
index cfd99adf27..3958bdf354 100644
--- a/src/vm/proftoeeinterfaceimpl.cpp
+++ b/src/vm/proftoeeinterfaceimpl.cpp
@@ -986,7 +986,7 @@ HRESULT AllowObjectInspection()
#endif // PROFILING_SUPPORTED
-#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+#if defined(PROFILING_SUPPORTED) || defined(FEATURE_EVENT_TRACE)
//---------------------------------------------------------------------------------------
//
@@ -2117,7 +2117,7 @@ HRESULT ProfToEEInterfaceImpl::GetFunctionFromIP2(LPCBYTE ip, FunctionID * pFunc
if (pReJitId != NULL)
{
MethodDesc * pMD = codeInfo.GetMethodDesc();
- *pReJitId = pMD->GetReJitManager()->GetReJitId(pMD, codeInfo.GetStartAddress());
+ *pReJitId = ReJitManager::GetReJitId(pMD, codeInfo.GetStartAddress());
}
return S_OK;
@@ -2592,13 +2592,24 @@ HRESULT ProfToEEInterfaceImpl::GetCodeInfo3(FunctionID functionId,
hr = ValidateParametersForGetCodeInfo(pMethodDesc, cCodeInfos, codeInfos);
if (SUCCEEDED(hr))
{
- hr = GetCodeInfoFromCodeStart(
- // Note here that we must consult the rejit manager to determine the code
- // start address
- pMethodDesc->GetReJitManager()->GetCodeStart(pMethodDesc, reJitId),
- cCodeInfos,
- pcCodeInfos,
- codeInfos);
+ CodeVersionManager* pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
+ ILCodeVersion ilCodeVersion = pCodeVersionManager->GetILCodeVersion(pMethodDesc, reJitId);
+
+ // Now that tiered compilation can create more than one jitted code version for the same rejit id
+ // we are arbitrarily choosing the first one to return. To return all of them we'd presumably need
+ // a new profiler API.
+ NativeCodeVersionCollection nativeCodeVersions = ilCodeVersion.GetNativeCodeVersions(pMethodDesc);
+ for (NativeCodeVersionIterator iter = nativeCodeVersions.Begin(); iter != nativeCodeVersions.End(); iter++)
+ {
+ PCODE pCodeStart = iter->GetNativeCode();
+ hr = GetCodeInfoFromCodeStart(
+ pCodeStart,
+ cCodeInfos,
+ pcCodeInfos,
+ codeInfos);
+ break;
+ }
+
}
}
EX_CATCH_HRESULT(hr);
@@ -6425,7 +6436,7 @@ HRESULT ProfToEEInterfaceImpl::GetFunctionFromIP3(LPCBYTE ip, FunctionID * pFunc
if (pReJitId != NULL)
{
MethodDesc * pMD = codeInfo.GetMethodDesc();
- *pReJitId = pMD->GetReJitManager()->GetReJitId(pMD, codeInfo.GetStartAddress());
+ *pReJitId = ReJitManager::GetReJitId(pMD, codeInfo.GetStartAddress());
}
return S_OK;
@@ -6832,7 +6843,7 @@ HRESULT ProfToEEInterfaceImpl::GetClassLayout(ClassID classID,
// running into - attempting to get the class layout for all types at module load time.
// If we don't detect this the runtime will AV during the field iteration below. Feel
// free to eliminate this check when a more complete solution is available.
- if (CORCOMPILE_IS_POINTER_TAGGED(*(typeHandle.AsMethodTable()->GetParentMethodTablePtr())))
+ if (typeHandle.AsMethodTable()->GetParentMethodTablePlainOrRelativePointerPtr()->IsTagged())
{
return CORPROF_E_DATAINCOMPLETE;
}
@@ -8239,7 +8250,7 @@ HRESULT ProfToEEInterfaceImpl::GetReJITIDs(
MethodDesc * pMD = FunctionIdToMethodDesc(functionId);
- return pMD->GetReJitManager()->GetReJITIDs(pMD, cReJitIds, pcReJitIds, reJitIds);
+ return ReJitManager::GetReJITIDs(pMD, cReJitIds, pcReJitIds, reJitIds);
}
HRESULT ProfToEEInterfaceImpl::RequestReJIT(ULONG cFunctions, // in
diff --git a/src/vm/readytoruninfo.cpp b/src/vm/readytoruninfo.cpp
index b85cf9a9c3..996a862431 100644
--- a/src/vm/readytoruninfo.cpp
+++ b/src/vm/readytoruninfo.cpp
@@ -483,6 +483,12 @@ PTR_ReadyToRunInfo ReadyToRunInfo::Initialize(Module * pModule, AllocMemTracker
return NULL;
}
+ if (CORProfilerDisableAllNGenImages() || CORProfilerUseProfileImages())
+ {
+ DoLog("Ready to Run disabled - profiler disabled native images");
+ return NULL;
+ }
+
if (g_pConfig->ExcludeReadyToRun(pModule->GetSimpleName()))
{
DoLog("Ready to Run disabled - module on exclusion list");
diff --git a/src/vm/reflectioninvocation.cpp b/src/vm/reflectioninvocation.cpp
index 7f8a9e0075..00556d8805 100644
--- a/src/vm/reflectioninvocation.cpp
+++ b/src/vm/reflectioninvocation.cpp
@@ -12,7 +12,6 @@
#include "method.hpp"
#include "typehandle.h"
#include "field.h"
-#include "security.h"
#include "eeconfig.h"
#include "vars.hpp"
#include "jitinterface.h"
@@ -36,13 +35,13 @@
// it's used for both method and field to signify that no access is allowed
#define INVOCATION_FLAGS_NO_INVOKE 0x00000002
-#define INVOCATION_FLAGS_NEED_SECURITY 0x00000004
+// #define unused 0x00000004
// because field and method are different we can reuse the same bits
//method
#define INVOCATION_FLAGS_IS_CTOR 0x00000010
#define INVOCATION_FLAGS_RISKY_METHOD 0x00000020
-#define INVOCATION_FLAGS_W8P_API 0x00000040
+// #define unused 0x00000040
#define INVOCATION_FLAGS_IS_DELEGATE_CTOR 0x00000080
#define INVOCATION_FLAGS_CONTAINS_STACK_POINTERS 0x00000100
// field
@@ -76,24 +75,6 @@ static TypeHandle NullableTypeOfByref(TypeHandle th) {
return subType;
}
-static void TryDemand(DWORD whatPermission, RuntimeExceptionKind reKind, LPCWSTR wszTag) {
- CONTRACTL {
- THROWS;
- GC_TRIGGERS;
- MODE_COOPERATIVE;
- }
- CONTRACTL_END;
-
-
- EX_TRY {
- Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, whatPermission);
- }
- EX_CATCH {
- COMPlusThrow(reKind, wszTag);
- }
- EX_END_CATCH_UNREACHABLE
-}
-
static void TryCallMethodWorker(MethodDescCallSite* pMethodCallSite, ARG_SLOT* args, Frame* pDebuggerCatchFrame)
{
// Use static contracts b/c we have SEH.
@@ -128,7 +109,7 @@ static void TryCallMethodWorker(MethodDescCallSite* pMethodCallSite, ARG_SLOT* a
// then transfers that data to the newly produced TargetInvocationException. This one
// doesn't take those same steps.
//
-static void TryCallMethod(MethodDescCallSite* pMethodCallSite, ARG_SLOT* args) {
+static void TryCallMethod(MethodDescCallSite* pMethodCallSite, ARG_SLOT* args, bool wrapExceptions) {
CONTRACTL {
THROWS;
GC_TRIGGERS;
@@ -136,32 +117,39 @@ static void TryCallMethod(MethodDescCallSite* pMethodCallSite, ARG_SLOT* args) {
}
CONTRACTL_END;
- OBJECTREF ppException = NULL;
- GCPROTECT_BEGIN(ppException);
+ if (wrapExceptions)
+ {
+ OBJECTREF ppException = NULL;
+ GCPROTECT_BEGIN(ppException);
- // The sole purpose of having this frame is to tell the debugger that we have a catch handler here
- // which may swallow managed exceptions. The debugger needs this in order to send a
- // CatchHandlerFound (CHF) notification.
- FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame;
- EX_TRY {
- TryCallMethodWorker(pMethodCallSite, args, &catchFrame);
- }
- EX_CATCH {
- ppException = GET_THROWABLE();
- _ASSERTE(ppException);
- }
- EX_END_CATCH(RethrowTransientExceptions)
- catchFrame.Pop();
+ // The sole purpose of having this frame is to tell the debugger that we have a catch handler here
+ // which may swallow managed exceptions. The debugger needs this in order to send a
+ // CatchHandlerFound (CHF) notification.
+ FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame;
+ EX_TRY{
+ TryCallMethodWorker(pMethodCallSite, args, &catchFrame);
+ }
+ EX_CATCH{
+ ppException = GET_THROWABLE();
+ _ASSERTE(ppException);
+ }
+ EX_END_CATCH(RethrowTransientExceptions)
+ catchFrame.Pop();
- // It is important to re-throw outside the catch block because re-throwing will invoke
- // the jitter and managed code and will cause us to use more than the backout stack limit.
- if (ppException != NULL)
+ // It is important to re-throw outside the catch block because re-throwing will invoke
+ // the jitter and managed code and will cause us to use more than the backout stack limit.
+ if (ppException != NULL)
+ {
+ // If we get here we need to throw an TargetInvocationException
+ OBJECTREF except = InvokeUtil::CreateTargetExcept(&ppException);
+ COMPlusThrow(except);
+ }
+ GCPROTECT_END();
+ }
+ else
{
- // If we get here we need to throw an TargetInvocationException
- OBJECTREF except = InvokeUtil::CreateTargetExcept(&ppException);
- COMPlusThrow(except);
+ pMethodCallSite->CallWithValueTypes(args);
}
- GCPROTECT_END();
}
@@ -222,55 +210,6 @@ FCIMPL5(Object*, RuntimeFieldHandle::GetValue, ReflectFieldObject *pFieldUNSAFE,
}
FCIMPLEND
-FCIMPL5(void, ReflectionInvocation::PerformVisibilityCheckOnField, FieldDesc *pFieldDesc, Object *target, ReflectClassBaseObject *pDeclaringTypeUNSAFE, DWORD attr, DWORD invocationFlags) {
- CONTRACTL {
- FCALL_CHECK;
- PRECONDITION(CheckPointer(pFieldDesc));
- PRECONDITION(CheckPointer(pDeclaringTypeUNSAFE));
- }
- CONTRACTL_END;
-
-
- REFLECTCLASSBASEREF refDeclaringType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pDeclaringTypeUNSAFE);
-
- TypeHandle declaringType = refDeclaringType->GetType();
- OBJECTREF targetObj = ObjectToOBJECTREF(target);
-
- HELPER_METHOD_FRAME_BEGIN_2(targetObj, refDeclaringType);
-
- if ((invocationFlags & INVOCATION_FLAGS_SPECIAL_FIELD) != 0) {
- // Verify that this is not a Final Field
- if (IsFdInitOnly(attr))
- TryDemand(SECURITY_SERIALIZATION, kFieldAccessException, W("Acc_ReadOnly"));
- if (IsFdHasFieldRVA(attr))
- TryDemand(SECURITY_SKIP_VER, kFieldAccessException, W("Acc_RvaStatic"));
- }
-
- if ((invocationFlags & INVOCATION_FLAGS_NEED_SECURITY) != 0) {
- // Verify the callee/caller access
-
- bool targetRemoted = FALSE;
-
-
- RefSecContext sCtx(InvokeUtil::GetInvocationAccessCheckType(targetRemoted));
-
- MethodTable* pInstanceMT = NULL;
- if (targetObj != NULL && !pFieldDesc->IsStatic()) {
- TypeHandle targetType = targetObj->GetTypeHandle();
- if (!targetType.IsTypeDesc())
- pInstanceMT = targetType.AsMethodTable();
- }
-
- // Perform the normal access check (caller vs field).
- InvokeUtil::CanAccessField(&sCtx,
- declaringType.GetMethodTable(),
- pInstanceMT,
- pFieldDesc);
- }
- HELPER_METHOD_FRAME_END();
-}
-FCIMPLEND
-
FCIMPL2(FC_BOOL_RET, ReflectionInvocation::CanValueSpecialCast, ReflectClassBaseObject *pValueTypeUNSAFE, ReflectClassBaseObject *pTargetTypeUNSAFE) {
CONTRACTL {
FCALL_CHECK;
@@ -296,9 +235,7 @@ FCIMPL2(FC_BOOL_RET, ReflectionInvocation::CanValueSpecialCast, ReflectClassBase
// the object must be an IntPtr or a System.Reflection.Pointer
if (valueType == TypeHandle(MscorlibBinder::GetClass(CLASS__INTPTR))) {
//
- // it's an IntPtr, it's good. Demand SkipVerification and proceed
-
- Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
+ // it's an IntPtr, it's good.
}
//
// it's a System.Reflection.Pointer object
@@ -307,13 +244,7 @@ FCIMPL2(FC_BOOL_RET, ReflectionInvocation::CanValueSpecialCast, ReflectClassBase
else if (!InvokeUtil::IsVoidPtr(targetType)) {
if (!valueType.CanCastTo(targetType))
ret = FALSE;
- else
- // demand SkipVerification and proceed
- Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
}
- else
- // demand SkipVerification and proceed
- Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
} else {
// the field type is an enum or a primitive. To have any chance of assignement the object type must
// be an enum or primitive as well.
@@ -470,8 +401,9 @@ FCIMPL1(Object*, RuntimeTypeHandle::Allocate, ReflectClassBaseObject* pTypeUNSAF
}//Allocate
FCIMPLEND
-FCIMPL4(Object*, RuntimeTypeHandle::CreateInstance, ReflectClassBaseObject* refThisUNSAFE,
+FCIMPL5(Object*, RuntimeTypeHandle::CreateInstance, ReflectClassBaseObject* refThisUNSAFE,
CLR_BOOL publicOnly,
+ CLR_BOOL wrapExceptions,
CLR_BOOL* pbCanBeCached,
MethodDesc** pConstructor) {
CONTRACTL {
@@ -523,10 +455,6 @@ FCIMPL4(Object*, RuntimeTypeHandle::CreateInstance, ReflectClassBaseObject* refT
if (!pClassFactory)
COMPlusThrow(kInvalidComObjectException, IDS_EE_NO_BACKING_CLASS_FACTORY);
- // Check for the required permissions (SecurityPermission.UnmanagedCode),
- // since arbitrary unmanaged code in the class factory will execute below).
- Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_UNMANAGED_CODE);
-
// create an instance of the Com Object
rv = ((ComClassFactory*)pClassFactory)->CreateInstance(NULL);
@@ -539,11 +467,6 @@ FCIMPL4(Object*, RuntimeTypeHandle::CreateInstance, ReflectClassBaseObject* refT
else
#endif // FEATURE_COMINTEROP
{
- // If we are creating a COM object which has backing metadata we still
- // need to ensure that the caller has unmanaged code access permission.
- if (pVMT->IsComObjectType())
- Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_UNMANAGED_CODE);
-
// if this is an abstract class then we will fail this
if (pVMT->IsAbstract()) {
if (pVMT->IsInterface())
@@ -585,17 +508,17 @@ FCIMPL4(Object*, RuntimeTypeHandle::CreateInstance, ReflectClassBaseObject* refT
else // !pVMT->HasDefaultConstructor()
{
pMeth = pVMT->GetDefaultConstructor();
-
+
// Validate the method can be called by this caller
DWORD attr = pMeth->GetAttrs();
if (!IsMdPublic(attr) && publicOnly)
- COMPlusThrow(kMissingMethodException,W("Arg_NoDefCTor"));
+ COMPlusThrow(kMissingMethodException, W("Arg_NoDefCTor"));
// We've got the class, lets allocate it and call the constructor
OBJECTREF o;
bool remoting = false;
-
+
o = AllocateObject(pVMT);
GCPROTECT_BEGIN(o);
@@ -609,7 +532,7 @@ FCIMPL4(Object*, RuntimeTypeHandle::CreateInstance, ReflectClassBaseObject* refT
arg = ObjToArgSlot(o);
// Call the method
- TryCallMethod(&ctor, &arg);
+ TryCallMethod(&ctor, &arg, wrapExceptions);
rv = o;
GCPROTECT_END();
@@ -674,7 +597,7 @@ FCIMPL2(Object*, RuntimeTypeHandle::CreateInstanceForGenericType, ReflectClassBa
ARG_SLOT arg = ObjToArgSlot(gc.rv);
// Call the method
- TryCallMethod(&ctor, &arg);
+ TryCallMethod(&ctor, &arg, true);
HELPER_METHOD_FRAME_END();
return OBJECTREFToObject(gc.rv);
@@ -767,15 +690,6 @@ FCIMPL1(DWORD, ReflectionInvocation::GetSpecialSecurityFlags, ReflectMethodObjec
if (InvokeUtil::IsDangerousMethod(pMethod))
dwFlags |= INVOCATION_FLAGS_RISKY_METHOD;
- // Is there a link demand?
- if (pMethod->RequiresLinktimeCheck()) {
- dwFlags |= INVOCATION_FLAGS_NEED_SECURITY;
- }
- else
- if (Security::IsMethodCritical(pMethod) && !Security::IsMethodSafeCritical(pMethod)) {
- dwFlags |= INVOCATION_FLAGS_NEED_SECURITY;
- }
-
HELPER_METHOD_FRAME_END();
return dwFlags;
}
@@ -846,8 +760,6 @@ OBJECTREF InvokeArrayConstructor(ArrayTypeDesc* arrayDesc, MethodDesc* pMeth, PT
// If we're trying to create an array of pointers or function pointers,
// check that the caller has skip verification permission.
CorElementType et = arrayDesc->GetArrayElementTypeHandle().GetVerifierCorElementType();
- if (et == ELEMENT_TYPE_PTR || et == ELEMENT_TYPE_FNPTR)
- Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_SKIP_VER);
// Validate the argCnt an the Rank. Also allow nested SZARRAY's.
_ASSERTE(argCnt == (int) arrayDesc->GetRank() || argCnt == (int) arrayDesc->GetRank() * 2 ||
@@ -1122,8 +1034,9 @@ void DECLSPEC_NORETURN ThrowInvokeMethodException(MethodDesc * pMethod, OBJECTRE
GCPROTECT_END();
}
-FCIMPL4(Object*, RuntimeMethodHandle::InvokeMethod,
- Object *target, PTRArray *objs, SignatureNative* pSigUNSAFE, CLR_BOOL fConstructor)
+FCIMPL5(Object*, RuntimeMethodHandle::InvokeMethod,
+ Object *target, PTRArray *objs, SignatureNative* pSigUNSAFE,
+ CLR_BOOL fConstructor, CLR_BOOL fWrapExceptions)
{
FCALL_CONTRACT;
@@ -1429,30 +1342,38 @@ FCIMPL4(Object*, RuntimeMethodHandle::InvokeMethod,
FrameWithCookie<ProtectValueClassFrame>(pThread, pValueClasses);
}
- // The sole purpose of having this frame is to tell the debugger that we have a catch handler here
- // which may swallow managed exceptions. The debugger needs this in order to send a
- // CatchHandlerFound (CHF) notification.
- FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame(pThread);
-
// Call the method
bool fExceptionThrown = false;
- EX_TRY_THREAD(pThread) {
- CallDescrWorkerReflectionWrapper(&callDescrData, &catchFrame);
- } EX_CATCH {
- // Rethrow transient exceptions for constructors for backward compatibility
- if (fConstructor && GET_EXCEPTION()->IsTransient())
- {
- EX_RETHROW;
- }
+ if (fWrapExceptions)
+ {
+ // The sole purpose of having this frame is to tell the debugger that we have a catch handler here
+ // which may swallow managed exceptions. The debugger needs this in order to send a
+ // CatchHandlerFound (CHF) notification.
+ FrameWithCookie<DebuggerU2MCatchHandlerFrame> catchFrame(pThread);
+
+ EX_TRY_THREAD(pThread) {
+ CallDescrWorkerReflectionWrapper(&callDescrData, &catchFrame);
+ } EX_CATCH{
+ // Rethrow transient exceptions for constructors for backward compatibility
+ if (fConstructor && GET_EXCEPTION()->IsTransient())
+ {
+ EX_RETHROW;
+ }
// Abuse retval to store the exception object
gc.retVal = GET_THROWABLE();
_ASSERTE(gc.retVal);
fExceptionThrown = true;
- } EX_END_CATCH(SwallowAllExceptions);
+ } EX_END_CATCH(SwallowAllExceptions);
+
+ catchFrame.Pop(pThread);
+ }
+ else
+ {
+ CallDescrWorkerWithHandler(&callDescrData);
+ }
- catchFrame.Pop(pThread);
// Now that we are safely out of the catch block, we can create and raise the
// TargetInvocationException.
@@ -1826,12 +1747,6 @@ FCIMPL5(void, RuntimeFieldHandle::SetValueDirect, ReflectFieldObject *pFieldUNSA
// Verify that this is not a Final Field
DWORD attr = pField->GetAttributes(); // should we cache?
- if (IsFdInitOnly(attr)) {
- TryDemand(SECURITY_SERIALIZATION, kFieldAccessException, W("Acc_ReadOnly"));
- }
- if (IsFdHasFieldRVA(attr)) {
- TryDemand(SECURITY_SKIP_VER, kFieldAccessException, W("Acc_RvaStatic"));
- }
if (IsFdLiteral(attr))
COMPlusThrow(kFieldAccessException,W("Acc_ReadOnly"));
@@ -2581,10 +2496,6 @@ FCIMPL8(Object*, ReflectionInvocation::InvokeDispMethod, ReflectClassBaseObject*
_ASSERTE(gc.target != NULL);
_ASSERTE(gc.target->GetMethodTable()->IsComObjectType());
- // Unless security is turned off, we need to validate that the calling code
- // has unmanaged code access privilege.
- Security::SpecialDemand(SSWT_LATEBOUND_LINKDEMAND, SECURITY_UNMANAGED_CODE);
-
WORD flags = 0;
if (invokeAttr & BINDER_InvokeMethod)
flags |= DISPATCH_METHOD;
diff --git a/src/vm/reflectioninvocation.h b/src/vm/reflectioninvocation.h
index 206e7516be..6a183b134c 100644
--- a/src/vm/reflectioninvocation.h
+++ b/src/vm/reflectioninvocation.h
@@ -38,6 +38,7 @@
#define BINDER_OptionalParamBinding 0x040000
#define BINDER_IgnoreReturn 0x1000000
+#define BINDER_DoNotWrapExceptions 0x2000000
#define BINDER_DefaultLookup (BINDER_Instance | BINDER_Static | BINDER_Public)
#define BINDER_AllLookup (BINDER_Instance | BINDER_Static | BINDER_Public | BINDER_Instance)
@@ -80,8 +81,6 @@ public:
static FCDECL4(void, PerformSecurityCheck, Object *target, MethodDesc *pMeth, ReflectClassBaseObject *pParent, DWORD dwFlags);
static FCDECL2(void, CheckArgs, PTRArray *objs, SignatureNative sig);
- static FCDECL5(void, PerformVisibilityCheckOnField, FieldDesc *fieldDesc, Object *target, ReflectClassBaseObject *pDeclaringType, DWORD attr, DWORD invocationFlags);
-
static void PrepareDelegateHelper(OBJECTREF* pDelegate, BOOL onlyContractedMethod);
static void CanCacheTargetAndCrackedSig(MethodDesc* pMD);
};
diff --git a/src/vm/rejit.cpp b/src/vm/rejit.cpp
index 7bbd0e2f71..2a9c9e78a3 100644
--- a/src/vm/rejit.cpp
+++ b/src/vm/rejit.cpp
@@ -37,7 +37,7 @@
// appropriate IL and codegen flags, calling UnsafeJitFunction(), and redirecting the
// jump-stamp from the prestub to the newly-rejitted code.
//
-// * code:ReJitPublishMethodHolder::ReJitPublishMethodHolder
+// * code:PublishMethodHolder::PublishMethodHolder
// MethodDesc::MakeJitWorker() calls this to determine if there's an outstanding
// "pre-rejit" request for a MethodDesc that has just been jitted for the first time. We
// also call this from MethodDesc::CheckRestore when restoring generic methods.
@@ -48,8 +48,8 @@
// the PCODE, which is required to avoid races with a profiler that calls RequestReJIT
// just as the method finishes compiling/restoring.
//
-// * code:ReJitPublishMethodTableHolder::ReJitPublishMethodTableHolder
-// Does the same thing as ReJitPublishMethodHolder except iterating over every
+// * code:PublishMethodTableHolder::PublishMethodTableHolder
+// Does the same thing as PublishMethodHolder except iterating over every
// method in the MethodTable. This is called from MethodTable::SetIsRestored.
//
// * code:ReJitManager::GetCurrentReJitFlags:
@@ -156,20 +156,21 @@
#include "threadsuspend.h"
#ifdef FEATURE_REJIT
+#ifdef FEATURE_CODE_VERSIONING
#include "../debug/ee/debugger.h"
#include "../debug/ee/walker.h"
#include "../debug/ee/controller.h"
+#include "codeversion.h"
-// This HRESULT is only used as a private implementation detail. If it escapes functions
-// defined in this file it is a bug. Corerror.xml has a comment in it reserving this
-// value for our use but it doesn't appear in the public headers.
+// This HRESULT is only used as a private implementation detail. Corerror.xml has a comment in it
+// reserving this value for our use but it doesn't appear in the public headers.
#define CORPROF_E_RUNTIME_SUSPEND_REQUIRED 0x80131381
// This is just used as a unique id. Overflow is OK. If we happen to have more than 4+Billion rejits
// and somehow manage to not run out of memory, we'll just have to redefine ReJITID as size_t.
/* static */
-ReJITID SharedReJitInfo::s_GlobalReJitId = 1;
+static ReJITID s_GlobalReJitId = 1;
/* static */
CrstStatic ReJitManager::s_csGlobalRequest;
@@ -178,19 +179,20 @@ CrstStatic ReJitManager::s_csGlobalRequest;
//---------------------------------------------------------------------------------------
// Helpers
-inline CORJIT_FLAGS JitFlagsFromProfCodegenFlags(DWORD dwCodegenFlags)
+//static
+CORJIT_FLAGS ReJitManager::JitFlagsFromProfCodegenFlags(DWORD dwCodegenFlags)
{
LIMITED_METHOD_DAC_CONTRACT;
CORJIT_FLAGS jitFlags;
-
- // Note: COR_PRF_CODEGEN_DISABLE_INLINING is checked in
- // code:CEEInfo::canInline#rejit (it has no equivalent CORJIT flag).
-
if ((dwCodegenFlags & COR_PRF_CODEGEN_DISABLE_ALL_OPTIMIZATIONS) != 0)
{
jitFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE);
}
+ if ((dwCodegenFlags & COR_PRF_CODEGEN_DISABLE_INLINING) != 0)
+ {
+ jitFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_NO_INLINING);
+ }
// In the future more flags may be added that need to be converted here (e.g.,
// COR_PRF_CODEGEN_ENTERLEAVE / CORJIT_FLAG_PROF_ENTERLEAVE)
@@ -199,94 +201,6 @@ inline CORJIT_FLAGS JitFlagsFromProfCodegenFlags(DWORD dwCodegenFlags)
}
//---------------------------------------------------------------------------------------
-// Allocation helpers used by ReJitInfo / SharedReJitInfo to ensure they
-// stick stuff on the appropriate loader heap.
-
-void * LoaderHeapAllocatedRejitStructure::operator new (size_t size, LoaderHeap * pHeap, const NoThrow&)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- INJECT_FAULT(return NULL;);
- PRECONDITION(CheckPointer(pHeap));
- }
- CONTRACTL_END;
-
-#ifdef DACCESS_COMPILE
- return ::operator new(size, nothrow);
-#else
- return pHeap->AllocMem_NoThrow(S_SIZE_T(size));
-#endif
-}
-
-void * LoaderHeapAllocatedRejitStructure::operator new (size_t size, LoaderHeap * pHeap)
-{
- CONTRACTL
- {
- THROWS;
- GC_NOTRIGGER;
- MODE_ANY;
- INJECT_FAULT(COMPlusThrowOM());
- PRECONDITION(CheckPointer(pHeap));
- }
- CONTRACTL_END;
-
-#ifdef DACCESS_COMPILE
- return ::operator new(size);
-#else
- return pHeap->AllocMem(S_SIZE_T(size));
-#endif
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Simple, thin abstraction of debugger breakpoint patching. Given an address and a
-// previously procured DebuggerControllerPatch governing the code address, this decides
-// whether the code address is patched. If so, it returns a pointer to the debugger's
-// buffer (of what's "underneath" the int 3 patch); otherwise, it returns the code
-// address itself.
-//
-// Arguments:
-// * pbCode - Code address to return if unpatched
-// * dbgpatch - DebuggerControllerPatch to test
-//
-// Return Value:
-// Either pbCode or the debugger's patch buffer, as per description above.
-//
-// Assumptions:
-// Caller must manually grab (and hold) the ControllerLockHolder and get the
-// DebuggerControllerPatch before calling this helper.
-//
-// Notes:
-// pbCode need not equal the code address governed by dbgpatch, but is always
-// "related" (and sometimes really is equal). For example, this helper may be used
-// when writing a code byte to an internal rejit buffer (e.g., in preparation for an
-// eventual 64-bit interlocked write into the code stream), and thus pbCode would
-// point into the internal rejit buffer whereas dbgpatch governs the corresponding
-// code byte in the live code stream. This function would then be used to determine
-// whether a byte should be written into the internal rejit buffer OR into the
-// debugger controller's breakpoint buffer.
-//
-
-LPBYTE FirstCodeByteAddr(LPBYTE pbCode, DebuggerControllerPatch * dbgpatch)
-{
- LIMITED_METHOD_CONTRACT;
-
- if (dbgpatch != NULL && dbgpatch->IsActivated())
- {
- // Debugger has patched the code, so return the address of the buffer
- return LPBYTE(&(dbgpatch->opcode));
- }
-
- // no active patch, just return the direct code address
- return pbCode;
-}
-
-
-//---------------------------------------------------------------------------------------
// ProfilerFunctionControl implementation
ProfilerFunctionControl::ProfilerFunctionControl(LoaderHeap * pHeap) :
@@ -532,30 +446,6 @@ COR_IL_MAP* ProfilerFunctionControl::GetInstrumentedMapEntries()
#ifndef DACCESS_COMPILE
//---------------------------------------------------------------------------------------
-// Called by the prestub worker, this function is a simple wrapper which determines the
-// appropriate ReJitManager, and then calls DoReJitIfNecessaryWorker() on it. See the
-// comment at the top of code:ReJitManager::DoReJitIfNecessaryWorker for more info,
-// including parameter & return value descriptions.
-
-// static
-PCODE ReJitManager::DoReJitIfNecessary(PTR_MethodDesc pMD)
-{
- STANDARD_VM_CONTRACT;
-
- if (!pMD->HasNativeCode())
- {
- // If method hasn't been jitted yet, the prestub worker should just continue as
- // usual.
- return NULL;
- }
-
- // We've already published the JITted code for this MethodDesc, and yet we're
- // back in the prestub (who called us). Ask the appropriate rejit manager if that's because of a rejit request. If so, the
- // ReJitManager will take care of the rejit now
- return pMD->GetReJitManager()->DoReJitIfNecessaryWorker(pMD);
-}
-
-//---------------------------------------------------------------------------------------
//
// ICorProfilerInfo4::RequestReJIT calls into this guy to do most of the
// work. Takes care of finding the appropriate ReJitManager instances to
@@ -579,6 +469,18 @@ HRESULT ReJitManager::RequestReJIT(
ModuleID rgModuleIDs[],
mdMethodDef rgMethodDefs[])
{
+ return ReJitManager::UpdateActiveILVersions(cFunctions, rgModuleIDs, rgMethodDefs, NULL, FALSE);
+}
+
+
+ // static
+HRESULT ReJitManager::UpdateActiveILVersions(
+ ULONG cFunctions,
+ ModuleID rgModuleIDs[],
+ mdMethodDef rgMethodDefs[],
+ HRESULT rgHrStatuses[],
+ BOOL fIsRevert)
+{
CONTRACTL
{
NOTHROW;
@@ -599,20 +501,12 @@ HRESULT ReJitManager::RequestReJIT(
// Temporary storage to batch up all the ReJitInfos that will get jump stamped
// later when the runtime is suspended.
//
- //BUGBUG: Its not clear to me why it is safe to hold ReJitInfo* lists
- // outside the table locks. If an AppDomain unload occurred I don't see anything
- // that prevents them from being deleted. If this is a bug it is a pre-existing
- // condition and nobody has reported it as an issue yet. AppDomainExit probably
- // needs to synchronize with something.
- // Jan also pointed out the ModuleIDs have the same issue, in order to use this
- // function safely the profiler needs prevent the AppDomain which contains the
- // modules from being unloaded. I doubt any profilers are doing this intentionally
- // but calling from within typical callbacks like ModuleLoadFinished or
- // JIT events would do it for the current domain I think. Of course RequestRejit
- // could always be called with ModuleIDs in some other AppDomain.
- //END BUGBUG
- SHash<ReJitManagerJumpStampBatchTraits> mgrToJumpStampBatch;
- CDynArray<ReJitReportErrorWorkItem> errorRecords;
+ //DESKTOP WARNING: On CoreCLR we are safe but if this code ever gets ported back
+ //there aren't any protections against domain unload. Any of these moduleIDs
+ //code version managers, or code versions would become invalid if the domain which
+ //contains them was unloaded.
+ SHash<CodeActivationBatchTraits> mgrToCodeActivationBatch;
+ CDynArray<CodeVersionManager::CodePublishError> errorRecords;
for (ULONG i = 0; i < cFunctions; i++)
{
Module * pModule = reinterpret_cast< Module * >(rgModuleIDs[i]);
@@ -660,13 +554,13 @@ HRESULT ReJitManager::RequestReJIT(
}
}
- ReJitManager * pReJitMgr = pModule->GetReJitManager();
- _ASSERTE(pReJitMgr != NULL);
- ReJitManagerJumpStampBatch * pJumpStampBatch = mgrToJumpStampBatch.Lookup(pReJitMgr);
- if (pJumpStampBatch == NULL)
+ CodeVersionManager * pCodeVersionManager = pModule->GetCodeVersionManager();
+ _ASSERTE(pCodeVersionManager != NULL);
+ CodeActivationBatch * pCodeActivationBatch = mgrToCodeActivationBatch.Lookup(pCodeVersionManager);
+ if (pCodeActivationBatch == NULL)
{
- pJumpStampBatch = new (nothrow)ReJitManagerJumpStampBatch(pReJitMgr);
- if (pJumpStampBatch == NULL)
+ pCodeActivationBatch = new (nothrow)CodeActivationBatch(pCodeVersionManager);
+ if (pCodeActivationBatch == NULL)
{
return E_OUTOFMEMORY;
}
@@ -676,7 +570,7 @@ HRESULT ReJitManager::RequestReJIT(
{
// This guy throws when out of memory, but remains internally
// consistent (without adding the new element)
- mgrToJumpStampBatch.Add(pJumpStampBatch);
+ mgrToCodeActivationBatch.Add(pCodeActivationBatch);
}
EX_CATCH_HRESULT(hr);
@@ -687,133 +581,24 @@ HRESULT ReJitManager::RequestReJIT(
}
}
-
- // At this stage, pMD may be NULL or non-NULL, and the specified function may or
- // may not be a generic (or a function on a generic class). The operations
- // below depend on these conditions as follows:
- //
- // (1) If pMD == NULL || PMD has no code || pMD is generic
- // Do a "PRE-REJIT" (add a placeholder ReJitInfo that points to module/token;
- // there's nothing to jump-stamp)
- //
- // (2) IF pMD != NULL, but not generic (or function on generic class)
- // Do a REAL REJIT (add a real ReJitInfo that points to pMD and jump-stamp)
- //
- // (3) IF pMD != NULL, and is a generic (or function on generic class)
- // Do a real rejit (including jump-stamp) for all already-jitted instantiations.
-
- BaseDomain * pBaseDomainFromModule = pModule->GetDomain();
- SharedReJitInfo * pSharedInfo = NULL;
{
- CrstHolder ch(&(pReJitMgr->m_crstTable));
-
- // Do a PRE-rejit
- if (pMD == NULL || !pMD->HasNativeCode() || pMD->HasClassOrMethodInstantiation())
- {
- hr = pReJitMgr->MarkForReJit(
- pModule,
- rgMethodDefs[i],
- pJumpStampBatch,
- &errorRecords,
- &pSharedInfo);
- if (FAILED(hr))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- }
-
- if (pMD == NULL)
- {
- // nothing is loaded yet so only the pre-rejit placeholder is needed. We're done for this method.
- continue;
- }
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
- if (!pMD->HasClassOrMethodInstantiation() && pMD->HasNativeCode())
+ // Bind the il code version
+ ILCodeVersion* pILCodeVersion = pCodeActivationBatch->m_methodsToActivate.Append();
+ if (pILCodeVersion == NULL)
{
- // We have a JITted non-generic. Easy case. Just mark the JITted method
- // desc as needing to be rejitted
- hr = pReJitMgr->MarkForReJit(
- pMD,
- pSharedInfo,
- pJumpStampBatch,
- &errorRecords,
- NULL); // Don't need the SharedReJitInfo to be returned
-
- if (FAILED(hr))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
+ return E_OUTOFMEMORY;
}
-
- if (!pMD->HasClassOrMethodInstantiation())
+ if (fIsRevert)
{
- // not generic, we're done for this method
- continue;
- }
-
- // Ok, now the case of a generic function (or function on generic class), which
- // is loaded, and may thus have compiled instantiations.
- // It's impossible to get to any other kind of domain from the profiling API
- _ASSERTE(pBaseDomainFromModule->IsAppDomain() ||
- pBaseDomainFromModule->IsSharedDomain());
-
- if (pBaseDomainFromModule->IsSharedDomain())
- {
- // Iterate through all modules loaded into the shared domain, to
- // find all instantiations living in the shared domain. This will
- // include orphaned code (i.e., shared code used by ADs that have
- // all unloaded), which is good, because orphaned code could get
- // re-adopted if a new AD is created that can use that shared code
- hr = pReJitMgr->MarkAllInstantiationsForReJit(
- pSharedInfo,
- NULL, // NULL means to search SharedDomain instead of an AD
- pModule,
- rgMethodDefs[i],
- pJumpStampBatch,
- &errorRecords);
+ // activate the original version
+ *pILCodeVersion = ILCodeVersion(pModule, rgMethodDefs[i]);
}
else
{
- // Module is unshared, so just use the module's domain to find instantiations.
- hr = pReJitMgr->MarkAllInstantiationsForReJit(
- pSharedInfo,
- pBaseDomainFromModule->AsAppDomain(),
- pModule,
- rgMethodDefs[i],
- pJumpStampBatch,
- &errorRecords);
- }
- if (FAILED(hr))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- }
-
- // We want to iterate through all compilations of existing instantiations to
- // ensure they get marked for rejit. Note: There may be zero instantiations,
- // but we won't know until we try.
- if (pBaseDomainFromModule->IsSharedDomain())
- {
- // Iterate through all real domains, to find shared instantiations.
- AppDomainIterator appDomainIterator(TRUE);
- while (appDomainIterator.Next())
- {
- AppDomain * pAppDomain = appDomainIterator.GetDomain();
- if (pAppDomain->IsUnloading())
- {
- continue;
- }
- CrstHolder ch(&(pReJitMgr->m_crstTable));
- hr = pReJitMgr->MarkAllInstantiationsForReJit(
- pSharedInfo,
- pAppDomain,
- pModule,
- rgMethodDefs[i],
- pJumpStampBatch,
- &errorRecords);
+ // activate an unused or new IL version
+ hr = ReJitManager::BindILVersion(pCodeVersionManager, pModule, rgMethodDefs[i], pILCodeVersion);
if (FAILED(hr))
{
_ASSERTE(hr == E_OUTOFMEMORY);
@@ -823,18 +608,18 @@ HRESULT ReJitManager::RequestReJIT(
}
} // for (ULONG i = 0; i < cFunctions; i++)
- // For each rejit mgr, if there's work to do, suspend EE if needed,
- // enter the rejit mgr's crst, and do the batched work.
+ // For each code versioning mgr, if there's work to do, suspend EE if needed,
+ // enter the code versioning mgr's crst, and do the batched work.
BOOL fEESuspended = FALSE;
- SHash<ReJitManagerJumpStampBatchTraits>::Iterator beginIter = mgrToJumpStampBatch.Begin();
- SHash<ReJitManagerJumpStampBatchTraits>::Iterator endIter = mgrToJumpStampBatch.End();
- for (SHash<ReJitManagerJumpStampBatchTraits>::Iterator iter = beginIter; iter != endIter; iter++)
+ SHash<CodeActivationBatchTraits>::Iterator beginIter = mgrToCodeActivationBatch.Begin();
+ SHash<CodeActivationBatchTraits>::Iterator endIter = mgrToCodeActivationBatch.End();
+ for (SHash<CodeActivationBatchTraits>::Iterator iter = beginIter; iter != endIter; iter++)
{
- ReJitManagerJumpStampBatch * pJumpStampBatch = *iter;
- ReJitManager * pMgr = pJumpStampBatch->pReJitManager;
+ CodeActivationBatch * pCodeActivationBatch = *iter;
+ CodeVersionManager * pCodeVersionManager = pCodeActivationBatch->m_pCodeVersionManager;
- int cBatchedPreStubMethods = pJumpStampBatch->preStubMethods.Count();
- if (cBatchedPreStubMethods == 0)
+ int cMethodsToActivate = pCodeActivationBatch->m_methodsToActivate.Count();
+ if (cMethodsToActivate == 0)
{
continue;
}
@@ -842,14 +627,12 @@ HRESULT ReJitManager::RequestReJIT(
{
// As a potential future optimization we could speculatively try to update the jump stamps without
// suspending the runtime. That needs to be plumbed through BatchUpdateJumpStamps though.
-
ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
fEESuspended = TRUE;
}
- CrstHolder ch(&(pMgr->m_crstTable));
_ASSERTE(ThreadStore::HoldingThreadStore());
- hr = pMgr->BatchUpdateJumpStamps(&(pJumpStampBatch->undoMethods), &(pJumpStampBatch->preStubMethods), &errorRecords);
+ hr = pCodeVersionManager->SetActiveILCodeVersions(pCodeActivationBatch->m_methodsToActivate.Ptr(), pCodeActivationBatch->m_methodsToActivate.Count(), fEESuspended, &errorRecords);
if (FAILED(hr))
break;
}
@@ -867,702 +650,72 @@ HRESULT ReJitManager::RequestReJIT(
// Report any errors that were batched up
for (int i = 0; i < errorRecords.Count(); i++)
{
- ReportReJITError(&(errorRecords[i]));
- }
-
- INDEBUG(SharedDomain::GetDomain()->GetReJitManager()->Dump(
- "Finished RequestReJIT(). Dumping Shared ReJitManager\n"));
-
- // We got through processing everything, but profiler will need to see the individual ReJITError
- // callbacks to know what, if anything, failed.
- return S_OK;
-}
-
-//---------------------------------------------------------------------------------------
-//
-// Helper used by ReJitManager::RequestReJIT to jump stamp all the methods that were
-// specified by the caller. Also used by RejitManager::DoJumpStampForAssemblyIfNecessary
-// when rejitting a batch of generic method instantiations in a newly loaded NGEN assembly.
-//
-// This method is responsible for calling ReJITError on the profiler if anything goes
-// wrong.
-//
-// Arguments:
-// * pUndoMethods - array containing the methods that need the jump stamp removed
-// * pPreStubMethods - array containing the methods that need to be jump stamped to prestub
-// * pErrors - any errors will be appended to this array
-//
-// Returns:
-// S_OK - all methods are updated or added an error to the pErrors array
-// E_OUTOFMEMORY - some methods neither updated nor added an error to pErrors array
-// ReJitInfo state remains consistent
-//
-// Assumptions:
-// 1) Caller prevents contention by either:
-// a) Suspending the runtime
-// b) Ensuring all methods being updated haven't been published
-//
-HRESULT ReJitManager::BatchUpdateJumpStamps(CDynArray<ReJitInfo *> * pUndoMethods, CDynArray<ReJitInfo *> * pPreStubMethods, CDynArray<ReJitReportErrorWorkItem> * pErrors)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_PREEMPTIVE;
- PRECONDITION(CheckPointer(pUndoMethods));
- PRECONDITION(CheckPointer(pPreStubMethods));
- PRECONDITION(CheckPointer(pErrors));
- }
- CONTRACTL_END;
-
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
- HRESULT hr = S_OK;
-
- ReJitInfo ** ppInfoEnd = pUndoMethods->Ptr() + pUndoMethods->Count();
- for (ReJitInfo ** ppInfoCur = pUndoMethods->Ptr(); ppInfoCur < ppInfoEnd; ppInfoCur++)
- {
- // If we are undoing jumpstamps they have been published already
- // and our caller is holding the EE suspended
- _ASSERTE(ThreadStore::HoldingThreadStore());
- if (FAILED(hr = (*ppInfoCur)->UndoJumpStampNativeCode(TRUE)))
- {
- if (FAILED(hr = AddReJITError(*ppInfoCur, hr, pErrors)))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- }
- }
-
- ppInfoEnd = pPreStubMethods->Ptr() + pPreStubMethods->Count();
- for (ReJitInfo ** ppInfoCur = pPreStubMethods->Ptr(); ppInfoCur < ppInfoEnd; ppInfoCur++)
- {
- if (FAILED(hr = (*ppInfoCur)->JumpStampNativeCode()))
- {
- if (FAILED(hr = AddReJITError(*ppInfoCur, hr, pErrors)))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- }
- }
- return S_OK;
-}
-
-//---------------------------------------------------------------------------------------
-//
-// Helper used by ReJitManager::RequestReJIT to iterate through any generic
-// instantiations of a function in a given AppDomain, and to create the corresponding
-// ReJitInfos for those MethodDescs. This also adds corresponding entries to a temporary
-// dynamic array created by our caller for batching up the jump-stamping we'll need to do
-// later.
-//
-// This method is responsible for calling ReJITError on the profiler if anything goes
-// wrong.
-//
-// Arguments:
-// * pSharedForAllGenericInstantiations - The SharedReJitInfo for this mdMethodDef's
-// rejit request. This is what we must associate any newly-created ReJitInfo with.
-// * pAppDomainToSearch - AppDomain in which to search for generic instantiations
-// matching the specified methodDef. If it is NULL, then we'll search for all
-// MethodDescs whose metadata definition appears in a Module loaded into the
-// SharedDomain (regardless of which ADs--if any--are using those MethodDescs).
-// This captures the case of domain-neutral code that was in use by an AD that
-// unloaded, and may come into use again once a new AD loads that can use the
-// shared code.
-// * pModuleContainingMethodDef - Module* containing the specified methodDef token.
-// * methodDef - Token for the method for which we're searching for MethodDescs.
-// * pJumpStampBatch - Batch we're responsible for placing ReJitInfo's into, on which
-// the caller will update the jump stamps.
-// * pRejitErrors - Dynamic array we're responsible for adding error records into.
-// The caller will report them to the profiler outside the table lock
-//
-// Returns:
-// S_OK - all methods were either marked for rejit OR have appropriate error records
-// in pRejitErrors
-// E_OUTOFMEMORY - some methods weren't marked for rejit AND we didn't have enough
-// memory to create the error records
-//
-// Assumptions:
-// * This function should only be called on the ReJitManager that owns the (generic)
-// definition of methodDef
-// * If pModuleContainingMethodDef is loaded into the SharedDomain, then
-// pAppDomainToSearch may be NULL (to search all instantiations loaded shared),
-// or may be non-NULL (to search all instantiations loaded into
-// pAppDomainToSearch)
-// * If pModuleContainingMethodDef is not loaded domain-neutral, then
-// pAppDomainToSearch must be non-NULL (and, indeed, must be the very AD that
-// pModuleContainingMethodDef is loaded into).
-//
-
-HRESULT ReJitManager::MarkAllInstantiationsForReJit(
- SharedReJitInfo * pSharedForAllGenericInstantiations,
- AppDomain * pAppDomainToSearch,
- PTR_Module pModuleContainingMethodDef,
- mdMethodDef methodDef,
- ReJitManagerJumpStampBatch* pJumpStampBatch,
- CDynArray<ReJitReportErrorWorkItem> * pRejitErrors)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_PREEMPTIVE;
- CAN_TAKE_LOCK;
- PRECONDITION(CheckPointer(pSharedForAllGenericInstantiations));
- PRECONDITION(CheckPointer(pAppDomainToSearch, NULL_OK));
- PRECONDITION(CheckPointer(pModuleContainingMethodDef));
- PRECONDITION(CheckPointer(pJumpStampBatch));
- }
- CONTRACTL_END;
-
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
- _ASSERTE(methodDef != mdTokenNil);
- _ASSERTE(pJumpStampBatch->pReJitManager == this);
-
- HRESULT hr;
-
- BaseDomain * pDomainContainingGenericDefinition = pModuleContainingMethodDef->GetDomain();
-
-#ifdef _DEBUG
- // This function should only be called on the ReJitManager that owns the (generic)
- // definition of methodDef
- _ASSERTE(this == pDomainContainingGenericDefinition->GetReJitManager());
-
- // If the generic definition is not loaded domain-neutral, then all its
- // instantiations will also be non-domain-neutral and loaded into the same
- // domain as the generic definition. So the caller may only pass the
- // domain containing the generic definition as pAppDomainToSearch
- if (!pDomainContainingGenericDefinition->IsSharedDomain())
- {
- _ASSERTE(pDomainContainingGenericDefinition == pAppDomainToSearch);
- }
-#endif //_DEBUG
-
- // If pAppDomainToSearch is NULL, iterate through all existing
- // instantiations loaded into the SharedDomain. If pAppDomainToSearch is non-NULL,
- // iterate through all existing instantiations in pAppDomainToSearch, and only consider
- // instantiations in non-domain-neutral assemblies (as we already covered domain
- // neutral assemblies when we searched the SharedDomain).
- LoadedMethodDescIterator::AssemblyIterationMode mode = LoadedMethodDescIterator::kModeSharedDomainAssemblies;
- // these are the default flags which won't actually be used in shared mode other than
- // asserting they were specified with their default values
- AssemblyIterationFlags assemFlags = (AssemblyIterationFlags) (kIncludeLoaded | kIncludeExecution);
- ModuleIterationOption moduleFlags = (ModuleIterationOption) kModIterIncludeLoaded;
- if (pAppDomainToSearch != NULL)
- {
- mode = LoadedMethodDescIterator::kModeUnsharedADAssemblies;
- assemFlags = (AssemblyIterationFlags)(kIncludeAvailableToProfilers | kIncludeExecution);
- moduleFlags = (ModuleIterationOption)kModIterIncludeAvailableToProfilers;
- }
- LoadedMethodDescIterator it(
- pAppDomainToSearch,
- pModuleContainingMethodDef,
- methodDef,
- mode,
- assemFlags,
- moduleFlags);
- CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly;
- while (it.Next(pDomainAssembly.This()))
- {
- MethodDesc * pLoadedMD = it.Current();
-
- if (!pLoadedMD->HasNativeCode())
- {
- // Skip uninstantiated MethodDescs. The placeholder added by our caller
- // is sufficient to ensure they'll eventually be rejitted when they get
- // compiled.
- continue;
- }
-
- if (FAILED(hr = IsMethodSafeForReJit(pLoadedMD)))
- {
- if (FAILED(hr = AddReJITError(pModuleContainingMethodDef, methodDef, pLoadedMD, hr, pRejitErrors)))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- continue;
- }
-
-#ifdef _DEBUG
- if (!pDomainContainingGenericDefinition->IsSharedDomain())
- {
- // Method is defined outside of the shared domain, so its instantiation must
- // be defined in the AD we're iterating over (pAppDomainToSearch, which, as
- // asserted above, must be the same domain as the generic's definition)
- _ASSERTE(pLoadedMD->GetDomain() == pAppDomainToSearch);
- }
-#endif // _DEBUG
-
- // This will queue up the MethodDesc for rejitting and create all the
- // look-aside tables needed.
- SharedReJitInfo * pSharedUsed = NULL;
- hr = MarkForReJit(
- pLoadedMD,
- pSharedForAllGenericInstantiations,
- pJumpStampBatch,
- pRejitErrors,
- &pSharedUsed);
- if (FAILED(hr))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- }
-
- return S_OK;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Helper used by ReJitManager::MarkAllInstantiationsForReJit and
-// ReJitManager::RequestReJIT to do the actual ReJitInfo allocation and
-// placement inside m_table. Note that callers don't use MarkForReJitHelper
-// directly. Instead, callers actually use the inlined overloaded wrappers
-// ReJitManager::MarkForReJit (one for placeholder (i.e., methodDef pre-rejit)
-// ReJitInfos and one for regular (i.e., MethodDesc) ReJitInfos). When the
-// overloaded MarkForReJit wrappers call this, they ensure that either pMD is
-// valid XOR (pModule, methodDef) is valid.
-//
-// Arguments:
-// * pMD - MethodDesc for which to find / create ReJitInfo. Only used if
-// we're creating a regular ReJitInfo
-// * pModule - Module for which to find / create ReJitInfo. Only used if
-// we're creating a placeholder ReJitInfo
-// * methodDef - methodDef for which to find / create ReJitInfo. Only used
-// if we're creating a placeholder ReJitInfo
-// * pSharedToReuse - SharedReJitInfo to associate any newly created
-// ReJitInfo with. If NULL, we'll create a new one.
-// * pJumpStampBatch - a batch of methods that need to have jump stamps added
-// or removed. This method will add new ReJitInfos to the batch as needed.
-// * pRejitErrors - An array of rejit errors that this call will append to
-// if there is an error marking
-// * ppSharedUsed - [out]: SharedReJitInfo used for this request. If
-// pSharedToReuse is non-NULL, *ppSharedUsed == pSharedToReuse. Else,
-// *ppSharedUsed is the SharedReJitInfo newly-created to associate with
-// the ReJitInfo used for this request.
-//
-// Return Value:
-// * S_OK: Successfully created a new ReJitInfo to manage this request
-// * S_FALSE: An existing ReJitInfo was already available to manage this
-// request, so we didn't need to create a new one.
-// * E_OUTOFMEMORY
-// * Else, a failure HRESULT indicating what went wrong.
-//
-
-HRESULT ReJitManager::MarkForReJitHelper(
- PTR_MethodDesc pMD,
- PTR_Module pModule,
- mdMethodDef methodDef,
- SharedReJitInfo * pSharedToReuse,
- ReJitManagerJumpStampBatch* pJumpStampBatch,
- CDynArray<ReJitReportErrorWorkItem> * pRejitErrors,
- /* out */ SharedReJitInfo ** ppSharedUsed)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_PREEMPTIVE;
- CAN_TAKE_LOCK;
- PRECONDITION(CheckPointer(pMD, NULL_OK));
- PRECONDITION(CheckPointer(pModule, NULL_OK));
- PRECONDITION(CheckPointer(pJumpStampBatch));
- PRECONDITION(CheckPointer(pRejitErrors));
- PRECONDITION(CheckPointer(ppSharedUsed, NULL_OK));
- }
- CONTRACTL_END;
-
- CrstHolder ch(&m_crstTable);
-
- // Either pMD is valid, xor (pModule,methodDef) is valid
- _ASSERTE(
- ((pMD != NULL) && (pModule == NULL) && (methodDef == mdTokenNil)) ||
- ((pMD == NULL) && (pModule != NULL) && (methodDef != mdTokenNil)));
- _ASSERTE(pJumpStampBatch->pReJitManager == this);
-
- if (ppSharedUsed != NULL)
- *ppSharedUsed = NULL;
- HRESULT hr = S_OK;
-
- // Check if there was there a previous rejit request for pMD
-
- ReJitInfoHash::KeyIterator beginIter(&m_table, TRUE /* begin */);
- ReJitInfoHash::KeyIterator endIter(&m_table, FALSE /* begin */);
-
- if (pMD != NULL)
- {
- beginIter = GetBeginIterator(pMD);
- endIter = GetEndIterator(pMD);
- }
- else
- {
- beginIter = GetBeginIterator(pModule, methodDef);
- endIter = GetEndIterator(pModule, methodDef);
- }
-
- for (ReJitInfoHash::KeyIterator iter = beginIter;
- iter != endIter;
- iter++)
- {
- ReJitInfo * pInfo = *iter;
- _ASSERTE(pInfo->m_pShared != NULL);
-
-#ifdef _DEBUG
- if (pMD != NULL)
- {
- _ASSERTE(pInfo->GetMethodDesc() == pMD);
- }
- else
- {
- Module * pModuleTest = NULL;
- mdMethodDef methodDefTest = mdTokenNil;
- pInfo->GetModuleAndToken(&pModuleTest, &methodDefTest);
- _ASSERTE((pModule == pModuleTest) && (methodDef == methodDefTest));
- }
-#endif //_DEBUG
-
- SharedReJitInfo * pShared = pInfo->m_pShared;
-
- switch (pShared->GetState())
- {
- case SharedReJitInfo::kStateRequested:
- // We can 'reuse' this instance because the profiler doesn't know about
- // it yet. (This likely happened because a profiler called RequestReJIT
- // twice in a row, without us having a chance to jmp-stamp the code yet OR
- // while iterating through instantiations of a generic, the iterator found
- // duplicate entries for the same instantiation.)
- _ASSERTE(pShared->m_pbIL == NULL);
- _ASSERTE(pInfo->m_pCode == NULL);
-
- if (ppSharedUsed != NULL)
- *ppSharedUsed = pShared;
-
- INDEBUG(AssertRestOfEntriesAreReverted(iter, endIter));
- return S_FALSE;
-
- case SharedReJitInfo::kStateGettingReJITParameters:
- case SharedReJitInfo::kStateActive:
+ if (rgHrStatuses != NULL)
{
- // Profiler has already requested to rejit this guy, AND we've already
- // at least started getting the rejit parameters from the profiler. We need to revert this
- // instance (this will put back the original code)
-
- INDEBUG(AssertRestOfEntriesAreReverted(iter, endIter));
- hr = Revert(pShared, pJumpStampBatch);
- if (FAILED(hr))
+ for (DWORD j = 0; j < cFunctions; j++)
{
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
+ if (rgMethodDefs[j] == errorRecords[i].methodDef &&
+ reinterpret_cast<Module*>(rgModuleIDs[j]) == errorRecords[i].pModule)
+ {
+ rgHrStatuses[j] = errorRecords[i].hrStatus;
+ }
}
- _ASSERTE(pShared->GetState() == SharedReJitInfo::kStateReverted);
-
- // No need to continue looping. Break out of loop to create a new
- // ReJitInfo to service the request.
- goto EXIT_LOOP;
- }
- case SharedReJitInfo::kStateReverted:
- // just ignore this guy
- continue;
-
- default:
- UNREACHABLE();
- }
- }
-EXIT_LOOP:
-
- // Either there was no ReJitInfo yet for this MethodDesc OR whatever we've found
- // couldn't be reused (and needed to be reverted). Create a new ReJitInfo to return
- // to the caller.
- //
- // If the caller gave us a pMD that is a new generic instantiation, then the caller
- // may also have provided a pSharedToReuse for the generic. Use that instead of
- // creating a new one.
-
- SharedReJitInfo * pShared = NULL;
-
- if (pSharedToReuse != NULL)
- {
- pShared = pSharedToReuse;
- }
- else
- {
- PTR_LoaderHeap pHeap = NULL;
- if (pModule != NULL)
- {
- pHeap = pModule->GetLoaderAllocator()->GetLowFrequencyHeap();
}
else
{
- pHeap = pMD->GetLoaderAllocator()->GetLowFrequencyHeap();
- }
- pShared = new (pHeap, nothrow) SharedReJitInfo;
- if (pShared == NULL)
- {
- return E_OUTOFMEMORY;
- }
- }
-
- _ASSERTE(pShared != NULL);
-
- // ReJitInfos with MethodDesc's need to be jump-stamped,
- // ReJitInfos with Module/MethodDef are placeholders that don't need a stamp
- ReJitInfo * pInfo = NULL;
- ReJitInfo ** ppInfo = &pInfo;
- if (pMD != NULL)
- {
- ppInfo = pJumpStampBatch->preStubMethods.Append();
- if (ppInfo == NULL)
- {
- return E_OUTOFMEMORY;
+ ReportReJITError(&(errorRecords[i]));
}
+
}
- hr = AddNewReJitInfo(pMD, pModule, methodDef, pShared, ppInfo);
- if (FAILED(hr))
- {
- // NOTE: We could consider using an AllocMemTracker or AllocMemHolder
- // here to back out the allocation of pShared, but it probably
- // wouldn't make much of a difference. We'll only get here if we ran
- // out of memory allocating the pInfo, so our memory has already been
- // blown. We can't cause much leaking due to this error path.
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
-
- _ASSERTE(*ppInfo != NULL);
-
- if (ppSharedUsed != NULL)
- *ppSharedUsed = pShared;
+ // We got through processing everything, but profiler will need to see the individual ReJITError
+ // callbacks to know what, if anything, failed.
return S_OK;
}
-//---------------------------------------------------------------------------------------
-//
-// Helper used by the above helpers (and also during jump-stamping) to
-// allocate and store a new ReJitInfo.
-//
-// Arguments:
-// * pMD - MethodDesc for which to create ReJitInfo. Only used if we're
-// creating a regular ReJitInfo
-// * pModule - Module for which create ReJitInfo. Only used if we're
-// creating a placeholder ReJitInfo
-// * methodDef - methodDef for which to create ReJitInfo. Only used if
-// we're creating a placeholder ReJitInfo
-// * pShared - SharedReJitInfo to associate the newly created ReJitInfo
-// with.
-// * ppInfo - [out]: ReJitInfo created
-//
-// Return Value:
-// * S_OK: ReJitInfo successfully created & stored.
-// * Else, failure indicating the problem. Currently only E_OUTOFMEMORY.
-//
-// Assumptions:
-// * Caller should be holding this ReJitManager's table crst.
-//
-
-HRESULT ReJitManager::AddNewReJitInfo(
- PTR_MethodDesc pMD,
+// static
+HRESULT ReJitManager::BindILVersion(
+ CodeVersionManager* pCodeVersionManager,
PTR_Module pModule,
mdMethodDef methodDef,
- SharedReJitInfo * pShared,
- ReJitInfo ** ppInfo)
+ ILCodeVersion *pILCodeVersion)
{
CONTRACTL
{
NOTHROW;
GC_NOTRIGGER;
- MODE_ANY;
- CAN_TAKE_LOCK;
- PRECONDITION(CheckPointer(pMD, NULL_OK));
- PRECONDITION(CheckPointer(pModule, NULL_OK));
- PRECONDITION(CheckPointer(pShared));
- PRECONDITION(CheckPointer(ppInfo));
- }
- CONTRACTL_END;
-
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
- _ASSERTE(pShared->GetState() != SharedReJitInfo::kStateReverted);
-
- // Either pMD is valid, xor (pModule,methodDef) is valid
- _ASSERTE(
- ((pMD != NULL) && (pModule == NULL) && (methodDef == mdTokenNil)) ||
- ((pMD == NULL) && (pModule != NULL) && (methodDef != mdTokenNil)));
-
- HRESULT hr;
- ReJitInfo * pInfo = NULL;
-
- if (pMD != NULL)
- {
- PTR_LoaderHeap pHeap = pMD->GetLoaderAllocator()->GetLowFrequencyHeap();
- pInfo = new (pHeap, nothrow) ReJitInfo(pMD, pShared);
- }
- else
- {
- PTR_LoaderHeap pHeap = pModule->GetLoaderAllocator()->GetLowFrequencyHeap();
- pInfo = new (pHeap, nothrow) ReJitInfo(pModule, methodDef, pShared);
- }
- if (pInfo == NULL)
- {
- return E_OUTOFMEMORY;
- }
-
- hr = S_OK;
- EX_TRY
- {
- // This guy throws when out of memory, but remains internally
- // consistent (without adding the new element)
- m_table.Add(pInfo);
- }
- EX_CATCH_HRESULT(hr);
-
- _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
- if (FAILED(hr))
- {
- pInfo = NULL;
- return hr;
- }
-
- *ppInfo = pInfo;
- return S_OK;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Given a MethodDesc, call ReJitInfo::JumpStampNativeCode to stamp the top of its
-// originally-jitted-code with a jmp that goes to the prestub. This is called by the
-// prestub worker after jitting the original code of a function (i.e., the "pre-rejit"
-// scenario). In this case, the EE is not suspended. But that's ok, because the PCODE has
-// not yet been published to the MethodDesc, and no thread can be executing inside the
-// originally JITted function yet.
-//
-// Arguments:
-// * pMD - MethodDesc to jmp-stamp
-// * pCode - Top of the code that was just jitted (using original IL).
-//
-//
-// Return value:
-// * S_OK: Either we successfully did the jmp-stamp, or we didn't have to (e.g., there
-// was no outstanding pre-rejit request for this MethodDesc, or a racing thread
-// took care of it for us).
-// * Else, HRESULT indicating failure.
-
-// Assumptions:
-// The caller has not yet published pCode to the MethodDesc, so no threads can be
-// executing inside pMD's code yet. Thus, we don't need to suspend the runtime while
-// applying the jump-stamp like we usually do for rejit requests that are made after
-// a function has been JITted.
-//
-
-HRESULT ReJitManager::DoJumpStampIfNecessary(MethodDesc* pMD, PCODE pCode)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
+ MODE_PREEMPTIVE;
CAN_TAKE_LOCK;
- PRECONDITION(CheckPointer(pMD));
- PRECONDITION(pCode != NULL);
+ PRECONDITION(CheckPointer(pCodeVersionManager));
+ PRECONDITION(CheckPointer(pModule));
+ PRECONDITION(CheckPointer(pILCodeVersion));
}
CONTRACTL_END;
- HRESULT hr;
-
- _ASSERTE(IsTableCrstOwnedByCurrentThread());
-
- ReJitInfo * pInfoToJumpStamp = NULL;
-
- // First, try looking up ReJitInfo by MethodDesc. A "regular" MethodDesc-based
- // ReJitInfo already exists for "case 1" (see comment above
- // code:ReJitInfo::JumpStampNativeCode), and could even exist for "case 2"
- // (pre-rejit), if either:
- // * The pre-rejit was requested after the MD had already been loaded (though
- // before it had been jitted) OR
- // * there was a race to JIT the original code for the MD, and another thread got
- // here before us and already added the ReJitInfo for that MD.
-
- ReJitInfoHash::KeyIterator beginIter = GetBeginIterator(pMD);
- ReJitInfoHash::KeyIterator endIter = GetEndIterator(pMD);
-
- pInfoToJumpStamp = FindPreReJittedReJitInfo(beginIter, endIter);
- if (pInfoToJumpStamp != NULL)
- {
- _ASSERTE(pInfoToJumpStamp->GetMethodDesc() == pMD);
- // does it need to be jump-stamped?
- if (pInfoToJumpStamp->GetState() != ReJitInfo::kJumpNone)
- {
- return S_OK;
- }
- else
- {
- return pInfoToJumpStamp->JumpStampNativeCode(pCode);
- }
- }
-
- // In this case, try looking up by module / metadata token. This is the case where
- // the pre-rejit request occurred before the MD was loaded.
-
- Module * pModule = pMD->GetModule();
- _ASSERTE(pModule != NULL);
- mdMethodDef methodDef = pMD->GetMemberDef();
-
- beginIter = GetBeginIterator(pModule, methodDef);
- endIter = GetEndIterator(pModule, methodDef);
- ReJitInfo * pInfoPlaceholder = NULL;
-
- pInfoPlaceholder = FindPreReJittedReJitInfo(beginIter, endIter);
- if (pInfoPlaceholder == NULL)
- {
- // No jump stamping to do.
- return S_OK;
- }
+ _ASSERTE(pCodeVersionManager->LockOwnedByCurrentThread());
+ _ASSERTE((pModule != NULL) && (methodDef != mdTokenNil));
- // The placeholder may already have a rejit info for this MD, in which
- // case we don't need to do any additional work
- for (ReJitInfo * pInfo = pInfoPlaceholder->m_pShared->GetMethods(); pInfo != NULL; pInfo = pInfo->m_pNext)
- {
- if ((pInfo->GetKey().m_keyType == ReJitInfo::Key::kMethodDesc) &&
- (pInfo->GetMethodDesc() == pMD))
- {
- // Any rejit info we find should already be jumpstamped
- _ASSERTE(pInfo->GetState() != ReJitInfo::kJumpNone);
- return S_OK;
- }
- }
+ // Check if there was there a previous rejit request for this method that hasn't been exposed back
+ // to the profiler yet
+ ILCodeVersion ilCodeVersion = pCodeVersionManager->GetActiveILCodeVersion(pModule, methodDef);
-#ifdef _DEBUG
+ if (ilCodeVersion.GetRejitState() == ILCodeVersion::kStateRequested)
{
- Module * pModuleTest = NULL;
- mdMethodDef methodDefTest = mdTokenNil;
- INDEBUG(pInfoPlaceholder->GetModuleAndToken(&pModuleTest, &methodDefTest));
- _ASSERTE((pModule == pModuleTest) && (methodDef == methodDefTest));
- }
-#endif //_DEBUG
+ // We can 'reuse' this instance because the profiler doesn't know about
+ // it yet. (This likely happened because a profiler called RequestReJIT
+ // twice in a row, without us having a chance to jmp-stamp the code yet OR
+ // while iterating through instantiations of a generic, the iterator found
+ // duplicate entries for the same instantiation.)
+ _ASSERTE(ilCodeVersion.GetILNoThrow() == NULL);
- // We have finished JITting the original code for a function that had been
- // "pre-rejitted" (i.e., requested to be rejitted before it was first compiled). So
- // now is the first time where we know the MethodDesc of the request.
- if (FAILED(hr = IsMethodSafeForReJit(pMD)))
- {
- // No jump stamping to do.
- return hr;
+ *pILCodeVersion = ilCodeVersion;
+ return S_FALSE;
}
- // Create the ReJitInfo associated with the MethodDesc now (pInfoToJumpStamp), and
- // jump-stamp the original code.
- pInfoToJumpStamp = NULL;
- hr = AddNewReJitInfo(pMD, NULL /*pModule*/, NULL /*methodDef*/, pInfoPlaceholder->m_pShared, &pInfoToJumpStamp);
- if (FAILED(hr))
- {
- return hr;
- }
-
- _ASSERTE(pInfoToJumpStamp != NULL);
- return pInfoToJumpStamp->JumpStampNativeCode(pCode);
+ // Either there was no ILCodeVersion yet for this MethodDesc OR whatever we've found
+ // couldn't be reused (and needed to be reverted). Create a new ILCodeVersion to return
+ // to the caller.
+ return pCodeVersionManager->AddILCodeVersion(pModule, methodDef, InterlockedIncrement(reinterpret_cast<LONG*>(&s_GlobalReJitId)), pILCodeVersion);
}
//---------------------------------------------------------------------------------------
@@ -1601,395 +754,41 @@ HRESULT ReJitManager::RequestRevert(
}
CONTRACTL_END;
- // Serialize all RequestReJIT() and Revert() calls against each other (even across AppDomains)
- CrstHolder ch(&(s_csGlobalRequest));
-
- // Request at least 1 method to revert!
- _ASSERTE ((cFunctions != 0) && (rgModuleIDs != NULL) && (rgMethodDefs != NULL));
-
- ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
- for (ULONG i = 0; i < cFunctions; i++)
- {
- HRESULT hr = E_UNEXPECTED;
- Module * pModule = reinterpret_cast< Module * >(rgModuleIDs[i]);
- if (pModule == NULL || TypeFromToken(rgMethodDefs[i]) != mdtMethodDef)
- {
- hr = E_INVALIDARG;
- }
- else if (pModule->IsBeingUnloaded())
- {
- hr = CORPROF_E_DATAINCOMPLETE;
- }
- else if (pModule->IsReflection())
- {
- hr = CORPROF_E_MODULE_IS_DYNAMIC;
- }
- else
- {
- hr = pModule->GetReJitManager()->RequestRevertByToken(pModule, rgMethodDefs[i]);
- }
-
- if (rgHrStatuses != NULL)
- {
- rgHrStatuses[i] = hr;
- }
- }
-
- ThreadSuspend::RestartEE(FALSE /* bFinishedGC */, TRUE /* SuspendSucceded */);
-
- return S_OK;
+ return UpdateActiveILVersions(cFunctions, rgModuleIDs, rgMethodDefs, rgHrStatuses, TRUE);
}
-//---------------------------------------------------------------------------------------
-//
-// Called by AppDomain::Exit() to notify the SharedDomain's ReJitManager that this
-// AppDomain is exiting. The SharedDomain's ReJitManager will then remove any
-// ReJitInfos relating to MDs owned by AppDomain. This is how we remove
-// non-domain-neutral instantiations of domain-neutral generics from the SharedDomain's
-// ReJitManager.
-//
-// Arguments:
-// pAppDomain - AppDomain that is exiting.
-//
-
// static
-void ReJitManager::OnAppDomainExit(AppDomain * pAppDomain)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CAN_TAKE_LOCK;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- // All ReJitInfos and SharedReJitInfos for this AD's ReJitManager automatically get
- // cleaned up as they're allocated on the AD's loader heap.
-
- // We explicitly clean up the SHash here, as its entries get allocated using regular
- // "new"
- pAppDomain->GetReJitManager()->m_table.RemoveAll();
-
- // We need to ensure that any MethodDescs from pAppDomain that are stored on the
- // SharedDomain's ReJitManager get removed from the SharedDomain's ReJitManager's
- // hash table, and from the linked lists tied to their owning SharedReJitInfo. (This
- // covers the case of non-domain-neutral instantiations of domain-neutral generics.)
- SharedDomain::GetDomain()->GetReJitManager()->RemoveReJitInfosFromDomain(pAppDomain);
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Small helper to determine whether a given (possibly instantiated generic) MethodDesc
-// is safe to rejit. If not, this function is responsible for calling into the
-// profiler's ReJITError()
-//
-// Arguments:
-// pMD - MethodDesc to test
-// Return Value:
-// S_OK iff pMD is safe to rejit
-// CORPROF_E_FUNCTION_IS_COLLECTIBLE - function can't be rejitted because it is collectible
-//
-
-// static
-HRESULT ReJitManager::IsMethodSafeForReJit(PTR_MethodDesc pMD)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CAN_TAKE_LOCK;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- _ASSERTE(pMD != NULL);
-
- // Weird, non-user functions were already weeded out in RequestReJIT(), and will
- // also never be passed to us by the prestub worker (for the pre-rejit case).
- _ASSERTE(pMD->IsIL());
-
- // Any MethodDescs that could be collected are not currently supported. Although we
- // rule out all Ref.Emit modules in RequestReJIT(), there can still exist types defined
- // in a non-reflection module and instantiated into a collectible assembly
- // (e.g., List<MyCollectibleStruct>). In the future we may lift this
- // restriction by updating the ReJitManager when the collectible assemblies
- // owning the instantiations get collected.
- if (pMD->GetLoaderAllocator()->IsCollectible())
- {
- return CORPROF_E_FUNCTION_IS_COLLECTIBLE;
- }
-
- return S_OK;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Simple wrapper around GetCurrentReJitWorker. See
-// code:ReJitManager::GetCurrentReJitWorker for information about parameters, return
-// values, etc.
-
-// static
-DWORD ReJitManager::GetCurrentReJitFlags(PTR_MethodDesc pMD)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_PREEMPTIVE;
- PRECONDITION(CheckPointer(pMD));
- }
- CONTRACTL_END;
-
- return pMD->GetReJitManager()->GetCurrentReJitFlagsWorker(pMD);
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Given a methodDef token, finds the corresponding ReJitInfo, and asks the
-// ReJitInfo to perform a revert.
-//
-// Arguments:
-// * pModule - Module to revert
-// * methodDef - methodDef token to revert
-//
-// Return Value:
-// HRESULT indicating success or failure. If the method was never
-// rejitted in the first place, this method returns a special error code
-// (CORPROF_E_ACTIVE_REJIT_REQUEST_NOT_FOUND).
-// E_OUTOFMEMORY
-//
-
-#ifdef _MSC_VER
-#pragma warning(push)
-#pragma warning(disable:4702) // Disable bogus unreachable code warning
-#endif // _MSC_VER
-HRESULT ReJitManager::RequestRevertByToken(PTR_Module pModule, mdMethodDef methodDef)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CAN_TAKE_LOCK;
- MODE_PREEMPTIVE;
- }
- CONTRACTL_END;
-
- _ASSERTE(ThreadStore::HoldingThreadStore());
- CrstHolder ch(&m_crstTable);
-
- _ASSERTE(pModule != NULL);
- _ASSERTE(methodDef != mdTokenNil);
-
- ReJitInfo * pInfo = NULL;
- MethodDesc * pMD = NULL;
-
- pInfo = FindNonRevertedReJitInfo(pModule, methodDef);
- if (pInfo == NULL)
- {
- pMD = pModule->LookupMethodDef(methodDef);
- pInfo = FindNonRevertedReJitInfo(pMD);
- if (pInfo == NULL)
- return CORPROF_E_ACTIVE_REJIT_REQUEST_NOT_FOUND;
- }
-
- _ASSERTE (pInfo != NULL);
- _ASSERTE (pInfo->m_pShared != NULL);
- _ASSERTE (pInfo->m_pShared->GetState() != SharedReJitInfo::kStateReverted);
- ReJitManagerJumpStampBatch batch(this);
- HRESULT hr = Revert(pInfo->m_pShared, &batch);
- if (FAILED(hr))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
- CDynArray<ReJitReportErrorWorkItem> errorRecords;
- hr = BatchUpdateJumpStamps(&(batch.undoMethods), &(batch.preStubMethods), &errorRecords);
- if (FAILED(hr))
- {
- _ASSERTE(hr == E_OUTOFMEMORY);
- return hr;
- }
-
- // If there were any errors, return the first one. This matches previous error handling
- // behavior that only returned the first error encountered within Revert().
- for (int i = 0; i < errorRecords.Count(); i++)
- {
- _ASSERTE(FAILED(errorRecords[i].hrStatus));
- return errorRecords[i].hrStatus;
- }
- return S_OK;
-}
-#ifdef _MSC_VER
-#pragma warning(pop)
-#endif // _MSC_VER
-
-
-
-//---------------------------------------------------------------------------------------
-//
-// Called by the prestub worker, this function decides if the MethodDesc needs to be
-// rejitted, and if so, this will call the profiler to get the rejit parameters (if they
-// are not yet stored), and then perform the actual re-JIT (by calling, indirectly,
-// UnsafeJitFunction).
-//
-// In order to allow the re-JIT to occur outside of any locks, the following sequence is
-// performed:
-//
-// * Enter this ReJitManager's table crst
-// * Find the single ReJitInfo (if any) in the table matching the input pMD. This
-// represents the outstanding rejit request against thie pMD
-// * If necessary, ask profiler for IL & codegen flags (by calling
-// GetReJITParameters()), thus transitioning the corresponding SharedReJitInfo
-// state kStateRequested-->kStateActive
-// * Exit this ReJitManager's table crst
-// * (following steps occur when DoReJitIfNecessary() calls DoReJit())
-// * Call profiler's ReJitCompilationStarted()
-// * Call UnsafeJitFunction with the IL / codegen flags provided by profiler, as stored
-// on the SharedReJitInfo. Note that if another Rejit request came in, then we would
-// create new SharedReJitInfo & ReJitInfo structures to track it, rather than
-// modifying the ReJitInfo / SharedReJitInfo we found above. So the ReJitInfo we're
-// using here (outside the lock), is "fixed" in the sense that its IL / codegen flags
-// will not change.
-// * (below is where we handle any races that might have occurred between threads
-// simultaneously rejitting this function)
-// * Enter this ReJitManager's table crst
-// * Check to see if another thread has already published the rejitted PCODE to
-// ReJitInfo::m_pCode. If so, bail.
-// * If we're the winner, publish our rejitted PCODE to ReJitInfo::m_pCode...
-// * ...and update the jump-stamp at the top of the originally JITted code so that it
-// now points to our rejitted code (instead of the prestub)
-// * Exit this ReJitManager's table crst
-// * Call profiler's ReJitCompilationFinished()
-// * Fire relevant ETW events
-//
-// Arguments:
-// pMD - MethodDesc to decide whether to rejit
-//
-// Return Value:
-// * If a rejit was performed, the PCODE of the generated code.
-// * If the ReJitManager changed its mind and chose not to do a rejit (e.g., a
-// revert request raced with this rejit request, and the revert won), just
-// return the PCODE of the originally JITted code (pMD->GetNativeCode())
-// * Else, NULL (which means the ReJitManager doesn't know or care about this
-// MethodDesc)
-//
-
-PCODE ReJitManager::DoReJitIfNecessaryWorker(PTR_MethodDesc pMD)
+HRESULT ReJitManager::ConfigureILCodeVersion(ILCodeVersion ilCodeVersion)
{
STANDARD_VM_CONTRACT;
- _ASSERTE(!IsTableCrstOwnedByCurrentThread());
+ CodeVersionManager* pCodeVersionManager = ilCodeVersion.GetModule()->GetCodeVersionManager();
+ _ASSERTE(!pCodeVersionManager->LockOwnedByCurrentThread());
- // Fast-path: If the rejit map is empty, no need to look up anything. Do this outside
- // of a lock to impact our caller (the prestub worker) as little as possible. If the
- // map is nonempty, we'll acquire the lock at that point and do the lookup for real.
- if (m_table.GetCount() == 0)
- {
- return NULL;
- }
HRESULT hr = S_OK;
- ReJitInfo * pInfoToRejit = NULL;
- Module* pModule = NULL;
- mdMethodDef methodDef = mdTokenNil;
+ Module* pModule = ilCodeVersion.GetModule();
+ mdMethodDef methodDef = ilCodeVersion.GetMethodDef();
BOOL fNeedsParameters = FALSE;
BOOL fWaitForParameters = FALSE;
{
- // Serialize access to the rejit table. Though once we find the ReJitInfo we want,
- // exit the Crst so we can ReJIT the method without holding a lock.
- CrstHolder ch(&m_crstTable);
-
- ReJitInfoHash::KeyIterator iter = GetBeginIterator(pMD);
- ReJitInfoHash::KeyIterator end = GetEndIterator(pMD);
-
- if (iter == end)
- {
- // No rejit actions necessary
- return NULL;
- }
-
-
- for (; iter != end; iter++)
+ // Serialize access to the rejit state
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ switch (ilCodeVersion.GetRejitState())
{
- ReJitInfo * pInfo = *iter;
- _ASSERTE(pInfo->GetMethodDesc() == pMD);
- _ASSERTE(pInfo->m_pShared != NULL);
- SharedReJitInfo * pShared = pInfo->m_pShared;
-
- switch (pShared->GetState())
- {
- case SharedReJitInfo::kStateRequested:
- if (pInfo->GetState() == ReJitInfo::kJumpNone)
- {
- // We haven't actually suspended threads and jump-stamped the
- // method's prolog so just ignore this guy
- INDEBUG(AssertRestOfEntriesAreReverted(iter, end));
- return NULL;
- }
- // When the SharedReJitInfo is still in the requested state, we haven't
- // gathered IL & codegen flags from the profiler yet. So, we can't be
- // pointing to rejitted code already. So we must be pointing to the prestub
- _ASSERTE(pInfo->GetState() == ReJitInfo::kJumpToPrestub);
-
- pInfo->GetModuleAndTokenRegardlessOfKeyType(&pModule, &methodDef);
- pShared->m_dwInternalFlags &= ~SharedReJitInfo::kStateMask;
- pShared->m_dwInternalFlags |= SharedReJitInfo::kStateGettingReJITParameters;
- pInfoToRejit = pInfo;
- fNeedsParameters = TRUE;
- break;
-
- case SharedReJitInfo::kStateGettingReJITParameters:
- if (pInfo->GetState() == ReJitInfo::kJumpNone)
- {
- // We haven't actually suspended threads and jump-stamped the
- // method's prolog so just ignore this guy
- INDEBUG(AssertRestOfEntriesAreReverted(iter, end));
- return NULL;
- }
- pInfoToRejit = pInfo;
- fWaitForParameters = TRUE;
- break;
-
- case SharedReJitInfo::kStateActive:
- INDEBUG(AssertRestOfEntriesAreReverted(iter, end));
- if (pInfo->GetState() == ReJitInfo::kJumpNone)
- {
- // We haven't actually suspended threads and jump-stamped the
- // method's prolog so just ignore this guy
- return NULL;
- }
- if (pInfo->GetState() == ReJitInfo::kJumpToRejittedCode)
- {
- // Looks like another thread has beat us in a race to rejit, so ignore.
- return NULL;
- }
-
- // Found a ReJitInfo to actually rejit.
- _ASSERTE(pInfo->GetState() == ReJitInfo::kJumpToPrestub);
- pInfoToRejit = pInfo;
- goto ExitLoop;
+ case ILCodeVersion::kStateRequested:
+ ilCodeVersion.SetRejitState(ILCodeVersion::kStateGettingReJITParameters);
+ fNeedsParameters = TRUE;
+ break;
- case SharedReJitInfo::kStateReverted:
- // just ignore this guy
- continue;
+ case ILCodeVersion::kStateGettingReJITParameters:
+ fWaitForParameters = TRUE;
+ break;
- default:
- UNREACHABLE();
- }
+ default:
+ return S_OK;
}
- ExitLoop:
- ;
- }
-
- if (pInfoToRejit == NULL)
- {
- // Didn't find the requested MD to rejit.
- return NULL;
}
if (fNeedsParameters)
@@ -2021,33 +820,39 @@ PCODE ReJitManager::DoReJitIfNecessaryWorker(PTR_MethodDesc pMD)
if (FAILED(hr))
{
{
- CrstHolder ch(&m_crstTable);
- if (pInfoToRejit->m_pShared->m_dwInternalFlags == SharedReJitInfo::kStateGettingReJITParameters)
+ // Historically on failure we would revert to the kRequested state and fall-back
+ // to the initial code gen. The next time the method ran it would try again.
+ //
+ // Preserving that behavior is possible, but a bit awkward now that we have
+ // Precode swapping as well. Instead of doing that I am acting as if GetReJITParameters
+ // had succeeded, using the original IL, no jit flags, and no modified IL mapping.
+ // This is similar to a fallback except the profiler won't get any further attempts
+ // to provide the parameters correctly. If the profiler wants another attempt it would
+ // need to call RequestRejit again.
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ if (ilCodeVersion.GetRejitState() == ILCodeVersion::kStateGettingReJITParameters)
{
- pInfoToRejit->m_pShared->m_dwInternalFlags &= ~SharedReJitInfo::kStateMask;
- pInfoToRejit->m_pShared->m_dwInternalFlags |= SharedReJitInfo::kStateRequested;
+ ilCodeVersion.SetRejitState(ILCodeVersion::kStateActive);
+ ilCodeVersion.SetIL(ILCodeVersion(pModule, methodDef).GetIL());
}
}
- ReportReJITError(pModule, methodDef, pMD, hr);
- return NULL;
+ ReportReJITError(pModule, methodDef, pModule->LookupMethodDef(methodDef), hr);
+ return S_OK;
}
-
+ else
{
- CrstHolder ch(&m_crstTable);
- if (pInfoToRejit->m_pShared->m_dwInternalFlags == SharedReJitInfo::kStateGettingReJITParameters)
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ if (ilCodeVersion.GetRejitState() == ILCodeVersion::kStateGettingReJITParameters)
{
// Inside the above call to ICorProfilerCallback4::GetReJITParameters, the profiler
// will have used the specified pFuncControl to provide its IL and codegen flags.
// So now we transfer it out to the SharedReJitInfo.
- pInfoToRejit->m_pShared->m_dwCodegenFlags = pFuncControl->GetCodegenFlags();
- pInfoToRejit->m_pShared->m_pbIL = pFuncControl->GetIL();
- // pShared is now the owner of the memory for the IL buffer
- pInfoToRejit->m_pShared->m_instrumentedILMap.SetMappingInfo(pFuncControl->GetInstrumentedMapEntryCount(),
+ ilCodeVersion.SetJitFlags(pFuncControl->GetCodegenFlags());
+ ilCodeVersion.SetIL((COR_ILMETHOD*)pFuncControl->GetIL());
+ // ilCodeVersion is now the owner of the memory for the IL buffer
+ ilCodeVersion.SetInstrumentedILMap(pFuncControl->GetInstrumentedMapEntryCount(),
pFuncControl->GetInstrumentedMapEntries());
- pInfoToRejit->m_pShared->m_dwInternalFlags &= ~SharedReJitInfo::kStateMask;
- pInfoToRejit->m_pShared->m_dwInternalFlags |= SharedReJitInfo::kStateActive;
- _ASSERTE(pInfoToRejit->m_pCode == NULL);
- _ASSERTE(pInfoToRejit->GetState() == ReJitInfo::kJumpToPrestub);
+ ilCodeVersion.SetRejitState(ILCodeVersion::kStateActive);
}
}
}
@@ -2077,568 +882,22 @@ PCODE ReJitManager::DoReJitIfNecessaryWorker(PTR_MethodDesc pMD)
while (true)
{
{
- CrstHolder ch(&m_crstTable);
- if (pInfoToRejit->m_pShared->GetState() == SharedReJitInfo::kStateActive)
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ if (ilCodeVersion.GetRejitState() == ILCodeVersion::kStateActive)
{
break; // the other thread got the parameters succesfully, go race to rejit
}
- else if (pInfoToRejit->m_pShared->GetState() == SharedReJitInfo::kStateRequested)
- {
- return NULL; // the other thread had an error getting parameters and went
- // back to requested
- }
- else if (pInfoToRejit->m_pShared->GetState() == SharedReJitInfo::kStateReverted)
- {
- break; // we got reverted, enter DoReJit anyways and it will detect this and
- // bail out.
- }
}
ClrSleepEx(1, FALSE);
}
}
-
- // We've got the info from the profiler, so JIT the method. This is also
- // responsible for updating the jump target from the prestub to the newly
- // rejitted code AND for publishing the top of the newly rejitted code to
- // pInfoToRejit->m_pCode. If two threads race to rejit, DoReJit handles the
- // race, and ensures the winner publishes his result to pInfoToRejit->m_pCode.
- return DoReJit(pInfoToRejit);
-
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Called by DoReJitIfNecessaryWorker(), this function assumes the IL & codegen flags have
-// already been gathered from the profiler, and then calls UnsafeJitFunction to perform
-// the re-JIT (bracketing that with profiler callbacks to announce the start/finish of
-// the rejit).
-//
-// This is also responsible for handling any races between multiple threads
-// simultaneously rejitting a function. See the comment at the top of
-// code:ReJitManager::DoReJitIfNecessaryWorker for details.
-//
-// Arguments:
-// pInfo - ReJitInfo tracking this MethodDesc's rejit request
-//
-// Return Value:
-// * Generally, return the PCODE of the start of the rejitted code. However,
-// depending on the result of races determined by DoReJit(), the return value
-// can be different:
-// * If the current thread races with another thread to do the rejit, return the
-// PCODE generated by the winner.
-// * If the current thread races with another thread doing a revert, and the revert
-// wins, then return the PCODE of the start of the originally JITted code
-// (i.e., pInfo->GetMethodDesc()->GetNativeCode())
-//
-
-PCODE ReJitManager::DoReJit(ReJitInfo * pInfo)
-{
- STANDARD_VM_CONTRACT;
-
-#ifdef PROFILING_SUPPORTED
-
- INDEBUG(Dump("Inside DoRejit(). Dumping this ReJitManager\n"));
-
- _ASSERTE(!pInfo->GetMethodDesc()->IsNoMetadata());
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
- g_profControlBlock.pProfInterface->ReJITCompilationStarted((FunctionID)pInfo->GetMethodDesc(),
- pInfo->m_pShared->GetId(),
- TRUE);
- END_PIN_PROFILER();
- }
-
- COR_ILMETHOD_DECODER ILHeader(pInfo->GetIL(), pInfo->GetMethodDesc()->GetMDImport(), NULL);
- PCODE pCodeOfRejittedCode = NULL;
-
- // Note that we're intentionally not enclosing UnsafeJitFunction in a try block
- // to swallow exceptions. It's expected that any exception thrown is fatal and
- // should pass through. This is in contrast to MethodDesc::MakeJitWorker, which
- // does enclose UnsafeJitFunction in a try block, and attempts to swallow an
- // exception that occurs on the current thread when another thread has
- // simultaneously attempted (and provably succeeded in) the JITting of the same
- // function. This is a very unusual case (likely due to an out of memory error
- // encountered on the current thread and not on the competing thread), which is
- // not worth attempting to cover.
- pCodeOfRejittedCode = UnsafeJitFunction(
- pInfo->GetMethodDesc(),
- &ILHeader,
- JitFlagsFromProfCodegenFlags(pInfo->m_pShared->m_dwCodegenFlags));
-
- _ASSERTE(pCodeOfRejittedCode != NULL);
-
- // This atomically updates the jmp target (from prestub to top of rejitted code) and publishes
- // the top of rejitted code into pInfo, all inside the same acquisition of this
- // ReJitManager's table Crst.
- HRESULT hr = S_OK;
- BOOL fEESuspended = FALSE;
- BOOL fNotify = FALSE;
- PCODE ret = NULL;
- while (true)
- {
- if (fEESuspended)
- {
- ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
- }
- CrstHolder ch(&m_crstTable);
-
- // Now that we're under the lock, recheck whether pInfo->m_pCode has been filled
- // in...
- if (pInfo->m_pCode != NULL)
- {
- // Yup, another thread rejitted this request at the same time as us, and beat
- // us to publishing the result. Intentionally skip the rest of this, and do
- // not issue a ReJITCompilationFinished from this thread.
- ret = pInfo->m_pCode;
- break;
- }
-
- // BUGBUG: This revert check below appears to introduce behavior we probably don't want.
- // This is a pre-existing issue and I don't have time to create a test for this right now,
- // but wanted to capture the issue in a comment for future work.
- // Imagine the profiler has one thread which is calling RequestReJIT periodically
- // updating the method's IL:
- // 1) RequestReJit (table lock keeps these atomic)
- // 1.1) Revert old shared rejit info
- // 1.2) Create new shared rejit info
- // 2) RequestReJit (table lock keeps these atomic)
- // 2.1) Revert old shared rejit info
- // 2.2) Create new shared rejit info
- // ...
- // On a second thread we keep calling the method which needs to periodically rejit
- // to update to the newest version:
- // a) [DoReJitIfNecessaryWorker] detects active rejit request
- // b) [DoReJit] if shared rejit info is reverted, execute original method code.
- //
- // Because (a) and (b) are not under the same lock acquisition this ordering is possible:
- // (1), (a), (2), (b)
- // The result is that (b) sees the shared rejit is reverted and the method executes its
- // original code. As a profiler using rejit I would expect either the IL specified in
- // (1) or the IL specified in (2) would be used, but never the original IL.
- //
- // I think the correct behavior is to bind a method execution to the current rejit
- // version at some point, and from then on we guarantee to execute that version of the
- // code, regardless of reverts or re-rejit request.
- //
- // There is also a related issue with GetCurrentReJitFlagsWorker which assumes jitting
- // always corresponds to the most recent version of the method. If we start pinning
- // method invocations to particular versions then that method can't be allowed to
- // float forward to the newest version, nor can it abort if the most recent version
- // is reverted.
- // END BUGBUG
- //
- // And recheck whether some other thread tried to revert this method in the
- // meantime (this check would also include an attempt to re-rejit the method
- // (i.e., calling RequestReJIT on the method multiple times), which would revert
- // this pInfo before creating a new one to track the latest rejit request).
- if (pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted)
- {
- // Yes, we've been reverted, so the jmp-to-prestub has already been removed,
- // and we should certainly not attempt to redirect that nonexistent jmp to
- // the code we just rejitted
- _ASSERTE(pInfo->GetMethodDesc()->GetNativeCode() != NULL);
- ret = pInfo->GetMethodDesc()->GetNativeCode();
- break;
- }
-
-#ifdef DEBUGGING_SUPPORTED
- // Notify the debugger of the rejitted function, so it can generate
- // DebuggerMethodInfo / DebugJitInfo for it. Normally this is done inside
- // UnsafeJitFunction (via CallCompileMethodWithSEHWrapper), but it skips this
- // when it detects the MethodDesc was already jitted. Since we know here that
- // we're rejitting it (and this is not just some sort of multi-thread JIT race),
- // now is a good place to notify the debugger.
- if (g_pDebugInterface != NULL)
- {
- g_pDebugInterface->JITComplete(pInfo->GetMethodDesc(), pCodeOfRejittedCode);
- }
-
-#endif // DEBUGGING_SUPPORTED
-
- _ASSERTE(pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive);
- _ASSERTE(pInfo->GetState() == ReJitInfo::kJumpToPrestub);
-
- // Atomically publish the PCODE and update the jmp stamp (to go to the rejitted
- // code) under the lock
- hr = pInfo->UpdateJumpTarget(fEESuspended, pCodeOfRejittedCode);
- if (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED)
- {
- _ASSERTE(!fEESuspended);
- fEESuspended = TRUE;
- continue;
- }
- if (FAILED(hr))
- {
- break;
- }
- pInfo->m_pCode = pCodeOfRejittedCode;
- fNotify = TRUE;
- ret = pCodeOfRejittedCode;
-
- _ASSERTE(pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive);
- _ASSERTE(pInfo->GetState() == ReJitInfo::kJumpToRejittedCode);
- break;
- }
-
- if (fEESuspended)
- {
- ThreadSuspend::RestartEE(FALSE /* bFinishedGC */, TRUE /* SuspendSucceded */);
- fEESuspended = FALSE;
- }
-
- if (FAILED(hr))
- {
- Module* pModule = NULL;
- mdMethodDef methodDef = mdTokenNil;
- pInfo->GetModuleAndTokenRegardlessOfKeyType(&pModule, &methodDef);
- ReportReJITError(pModule, methodDef, pInfo->GetMethodDesc(), hr);
- }
-
- // Notify the profiler that JIT completed.
- if (fNotify)
- {
- BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
- g_profControlBlock.pProfInterface->ReJITCompilationFinished((FunctionID)pInfo->GetMethodDesc(),
- pInfo->m_pShared->GetId(),
- S_OK,
- TRUE);
- END_PIN_PROFILER();
- }
-#endif // PROFILING_SUPPORTED
-
- // Fire relevant ETW events
- if (fNotify)
- {
- ETW::MethodLog::MethodJitted(
- pInfo->GetMethodDesc(),
- NULL, // namespaceOrClassName
- NULL, // methodName
- NULL, // methodSignature
- pCodeOfRejittedCode,
- pInfo->m_pShared->GetId());
- }
- return ret;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Transition SharedReJitInfo to Reverted state and add all associated ReJitInfos to the
-// undo list in the method batch
-//
-// Arguments:
-// pShared - SharedReJitInfo to revert
-// pJumpStampBatch - a batch of methods that need their jump stamps reverted. This method
-// is responsible for adding additional ReJitInfos to the list.
-//
-// Return Value:
-// S_OK if all MDs are batched and the SharedReJitInfo is marked reverted
-// E_OUTOFMEMORY (MDs couldn't be added to batch, SharedReJitInfo is not reverted)
-//
-// Assumptions:
-// Caller must be holding this ReJitManager's table crst.
-//
-
-HRESULT ReJitManager::Revert(SharedReJitInfo * pShared, ReJitManagerJumpStampBatch* pJumpStampBatch)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
- _ASSERTE((pShared->GetState() == SharedReJitInfo::kStateRequested) ||
- (pShared->GetState() == SharedReJitInfo::kStateGettingReJITParameters) ||
- (pShared->GetState() == SharedReJitInfo::kStateActive));
- _ASSERTE(pShared->GetMethods() != NULL);
- _ASSERTE(pJumpStampBatch->pReJitManager == this);
-
- HRESULT hrReturn = S_OK;
- for (ReJitInfo * pInfo = pShared->GetMethods(); pInfo != NULL; pInfo = pInfo->m_pNext)
- {
- if (pInfo->GetState() == ReJitInfo::kJumpNone)
- {
- // Nothing to revert for this MethodDesc / instantiation.
- continue;
- }
-
- ReJitInfo** ppInfo = pJumpStampBatch->undoMethods.Append();
- if (ppInfo == NULL)
- {
- return E_OUTOFMEMORY;
- }
- *ppInfo = pInfo;
- }
-
- pShared->m_dwInternalFlags &= ~SharedReJitInfo::kStateMask;
- pShared->m_dwInternalFlags |= SharedReJitInfo::kStateReverted;
+
return S_OK;
}
-
-//---------------------------------------------------------------------------------------
-//
-// Removes any ReJitInfos relating to MDs for the specified AppDomain from this
-// ReJitManager. This is used to remove non-domain-neutral instantiations of
-// domain-neutral generics from the SharedDomain's ReJitManager, when the AppDomain
-// containing those non-domain-neutral instantiations is unloaded.
-//
-// Arguments:
-// * pAppDomain - AppDomain that is exiting, and is thus the one for which we should
-// find ReJitInfos to remove
-//
-//
-
-void ReJitManager::RemoveReJitInfosFromDomain(AppDomain * pAppDomain)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- CAN_TAKE_LOCK;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- CrstHolder ch(&m_crstTable);
-
- INDEBUG(Dump("Dumping SharedDomain rejit manager BEFORE AD Unload"));
-
- for (ReJitInfoHash::Iterator iterCur = m_table.Begin(), iterEnd = m_table.End();
- iterCur != iterEnd;
- iterCur++)
- {
- ReJitInfo * pInfo = *iterCur;
-
- if (pInfo->m_key.m_keyType != ReJitInfo::Key::kMethodDesc)
- {
- // Skip all "placeholder" ReJitInfos--they'll always be allocated on a
- // loader heap for the shared domain.
- _ASSERTE(pInfo->m_key.m_keyType == ReJitInfo::Key::kMetadataToken);
- _ASSERTE(PTR_Module(pInfo->m_key.m_pModule)->GetDomain()->IsSharedDomain());
- continue;
- }
-
- if (pInfo->GetMethodDesc()->GetDomain() != pAppDomain)
- {
- // We only care about non-domain-neutral instantiations that live in
- // pAppDomain.
- continue;
- }
-
- // Remove this ReJitInfo from the linked-list of ReJitInfos associated with its
- // SharedReJitInfo.
- pInfo->m_pShared->RemoveMethod(pInfo);
-
- // Remove this ReJitInfo from the ReJitManager's hash table.
- m_table.Remove(iterCur);
-
- // pInfo is not deallocated yet. That will happen when pAppDomain finishes
- // unloading and its loader heaps get freed.
- }
- INDEBUG(Dump("Dumping SharedDomain rejit manager AFTER AD Unload"));
-}
-
#endif // DACCESS_COMPILE
// The rest of the ReJitManager methods are safe to compile for DAC
-
-//---------------------------------------------------------------------------------------
-//
-// Helper to iterate through m_table, finding the single matching non-reverted ReJitInfo.
-// The caller may search either by MethodDesc * XOR by (Module *, methodDef) pair.
-//
-// Arguments:
-// * pMD - MethodDesc * to search for. (NULL if caller is searching by (Module *,
-// methodDef)
-// * pModule - Module * to search for. (NULL if caller is searching by MethodDesc *)
-// * methodDef - methodDef to search for. (NULL if caller is searching by MethodDesc
-// *)
-//
-// Return Value:
-// ReJitInfo * requested, or NULL if none is found
-//
-// Assumptions:
-// Caller should be holding this ReJitManager's table crst.
-//
-
-PTR_ReJitInfo ReJitManager::FindNonRevertedReJitInfoHelper(
- PTR_MethodDesc pMD,
- PTR_Module pModule,
- mdMethodDef methodDef)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- INSTANCE_CHECK;
- }
- CONTRACTL_END;
-
- // Either pMD is valid, xor (pModule,methodDef) is valid
- _ASSERTE(
- ((pMD != NULL) && (pModule == NULL) && (methodDef == mdTokenNil)) ||
- ((pMD == NULL) && (pModule != NULL) && (methodDef != mdTokenNil)));
-
- // Caller should hold the Crst around calling this function and using the ReJitInfo.
-#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
-
- ReJitInfoHash::KeyIterator beginIter(&m_table, TRUE /* begin */);
- ReJitInfoHash::KeyIterator endIter(&m_table, FALSE /* begin */);
-
- if (pMD != NULL)
- {
- beginIter = GetBeginIterator(pMD);
- endIter = GetEndIterator(pMD);
- }
- else
- {
- beginIter = GetBeginIterator(pModule, methodDef);
- endIter = GetEndIterator(pModule, methodDef);
- }
-
- for (ReJitInfoHash::KeyIterator iter = beginIter;
- iter != endIter;
- iter++)
- {
- PTR_ReJitInfo pInfo = *iter;
- _ASSERTE(pInfo->m_pShared != NULL);
-
- if (pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted)
- continue;
-
- INDEBUG(AssertRestOfEntriesAreReverted(iter, endIter));
- return pInfo;
- }
-
- return NULL;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// ReJitManager instance constructor--for now, does nothing
-//
-
-ReJitManager::ReJitManager()
-{
- LIMITED_METHOD_DAC_CONTRACT;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Called from BaseDomain::BaseDomain to do any constructor-time initialization.
-// Presently, this takes care of initializing the Crst, choosing the type based on
-// whether this ReJitManager belongs to the SharedDomain.
-//
-// Arguments:
-// * fSharedDomain - nonzero iff this ReJitManager belongs to the SharedDomain.
-//
-
-void ReJitManager::PreInit(BOOL fSharedDomain)
-{
- CONTRACTL
- {
- THROWS;
- GC_TRIGGERS;
- CAN_TAKE_LOCK;
- MODE_ANY;
- }
- CONTRACTL_END;
-
-#ifndef DACCESS_COMPILE
- m_crstTable.Init(
- fSharedDomain ? CrstReJITSharedDomainTable : CrstReJITDomainTable,
- CrstFlags(CRST_UNSAFE_ANYMODE | CRST_DEBUGGER_THREAD | CRST_REENTRANCY | CRST_TAKEN_DURING_SHUTDOWN));
-#endif // DACCESS_COMPILE
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Finds the ReJitInfo tracking a pre-rejit request.
-//
-// Arguments:
-// * beginIter - Iterator to start search
-// * endIter - Iterator to end search
-//
-// Return Value:
-// NULL if no such ReJitInfo exists. This can occur if two thread race
-// to JIT the original code and we're the loser. Else, the ReJitInfo * found.
-//
-// Assumptions:
-// Caller must be holding this ReJitManager's table lock.
-//
-
-ReJitInfo * ReJitManager::FindPreReJittedReJitInfo(
- ReJitInfoHash::KeyIterator beginIter,
- ReJitInfoHash::KeyIterator endIter)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- // Caller shouldn't be handing out iterators unless he's already locking the table.
-#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
-
- for (ReJitInfoHash::KeyIterator iter = beginIter;
- iter != endIter;
- iter++)
- {
- ReJitInfo * pInfo = *iter;
- SharedReJitInfo * pShared = pInfo->m_pShared;
- _ASSERTE(pShared != NULL);
-
- switch (pShared->GetState())
- {
- case SharedReJitInfo::kStateRequested:
- case SharedReJitInfo::kStateGettingReJITParameters:
- case SharedReJitInfo::kStateActive:
- if (pInfo->GetState() == ReJitInfo::kJumpToRejittedCode)
- {
- // There was a race for the original JIT, and we're the loser. (The winner
- // has already published the original JIT's pcode, jump-stamped, and begun
- // the rejit!)
- return NULL;
- }
-
- // Otherwise, either we have a rejit request that has not yet been
- // jump-stamped, or there was a race for the original JIT, and another
- // thread jump-stamped its copy of the originally JITted code already. In
- // that case, we still don't know who the winner or loser will be (PCODE may
- // not yet be published), so we'll have to jump-stamp our copy just in case
- // we win.
- _ASSERTE((pInfo->GetState() == ReJitInfo::kJumpNone) ||
- (pInfo->GetState() == ReJitInfo::kJumpToPrestub));
- INDEBUG(AssertRestOfEntriesAreReverted(iter, endIter));
- return pInfo;
-
-
- case SharedReJitInfo::kStateReverted:
- // just ignore this guy
- continue;
-
- default:
- UNREACHABLE();
- }
- }
-
- return NULL;
-}
-
//---------------------------------------------------------------------------------------
//
// Used by profiler to get the ReJITID corrseponding to a (MethodDesc *, PCODE) pair.
@@ -2654,7 +913,7 @@ ReJitInfo * ReJitManager::FindPreReJittedReJitInfo(
// 0 if no such ReJITID found (e.g., PCODE is from a JIT and not a rejit), else the
// ReJITID requested.
//
-
+// static
ReJITID ReJitManager::GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart)
{
CONTRACTL
@@ -2662,7 +921,6 @@ ReJITID ReJitManager::GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart)
NOTHROW;
CAN_TAKE_LOCK;
GC_TRIGGERS;
- INSTANCE_CHECK;
PRECONDITION(CheckPointer(pMD));
PRECONDITION(pCodeStart != NULL);
}
@@ -2671,14 +929,14 @@ ReJITID ReJitManager::GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart)
// Fast-path: If the rejit map is empty, no need to look up anything. Do this outside
// of a lock to impact our caller (the prestub worker) as little as possible. If the
// map is nonempty, we'll acquire the lock at that point and do the lookup for real.
- if (m_table.GetCount() == 0)
+ CodeVersionManager* pCodeVersionManager = pMD->GetCodeVersionManager();
+ if (pCodeVersionManager->GetNonDefaultILVersionCount() == 0)
{
return 0;
}
- CrstHolder ch(&m_crstTable);
-
- return GetReJitIdNoLock(pMD, pCodeStart);
+ CodeVersionManager::TableLockHolder ch(pCodeVersionManager);
+ return ReJitManager::GetReJitIdNoLock(pMD, pCodeStart);
}
//---------------------------------------------------------------------------------------
@@ -2699,221 +957,21 @@ ReJITID ReJitManager::GetReJitIdNoLock(PTR_MethodDesc pMD, PCODE pCodeStart)
NOTHROW;
CANNOT_TAKE_LOCK;
GC_NOTRIGGER;
- INSTANCE_CHECK;
PRECONDITION(CheckPointer(pMD));
PRECONDITION(pCodeStart != NULL);
}
CONTRACTL_END;
// Caller must ensure this lock is taken!
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
+ CodeVersionManager* pCodeVersionManager = pMD->GetCodeVersionManager();
+ _ASSERTE(pCodeVersionManager->LockOwnedByCurrentThread());
- ReJitInfo * pInfo = FindReJitInfo(pMD, pCodeStart, 0);
- if (pInfo == NULL)
+ NativeCodeVersion nativeCodeVersion = pCodeVersionManager->GetNativeCodeVersion(pMD, pCodeStart);
+ if (nativeCodeVersion.IsNull())
{
return 0;
}
-
- _ASSERTE(pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive ||
- pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted);
- return pInfo->m_pShared->GetId();
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Used by profilers to map a (MethodDesc *, ReJITID) pair to the corresponding PCODE for
-// that rejit attempt. This can also be used for reverted methods, as the PCODE may still
-// be available and in use even after a rejitted function has been reverted.
-//
-// Arguments:
-// * pMD - MethodDesc * of interest
-// * reJitId - ReJITID of interest
-//
-// Return Value:
-// Corresponding PCODE of the rejit attempt, or NULL if no such rejit attempt can be
-// found.
-//
-
-PCODE ReJitManager::GetCodeStart(PTR_MethodDesc pMD, ReJITID reJitId)
-{
- CONTRACTL
- {
- NOTHROW;
- CAN_TAKE_LOCK;
- GC_NOTRIGGER;
- INSTANCE_CHECK;
- PRECONDITION(CheckPointer(pMD));
- PRECONDITION(reJitId != 0);
- }
- CONTRACTL_END;
-
- // Fast-path: If the rejit map is empty, no need to look up anything. Do this outside
- // of a lock to impact our caller (the prestub worker) as little as possible. If the
- // map is nonempty, we'll acquire the lock at that point and do the lookup for real.
- if (m_table.GetCount() == 0)
- {
- return NULL;
- }
-
- CrstHolder ch(&m_crstTable);
-
- ReJitInfo * pInfo = FindReJitInfo(pMD, NULL, reJitId);
- if (pInfo == NULL)
- {
- return NULL;
- }
-
- _ASSERTE(pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive ||
- pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted);
-
- return pInfo->m_pCode;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// If a function has been requested to be rejitted, finds the one current
-// SharedReJitInfo (ignoring all that are in the reverted state) and returns the codegen
-// flags recorded on it (which were thus used to rejit the MD). CEEInfo::canInline() calls
-// this as part of its calculation of whether it may inline a given method. (Profilers
-// may specify on a per-rejit-request basis whether the rejit of a method may inline
-// callees.)
-//
-// Arguments:
-// * pMD - MethodDesc * of interest.
-//
-// Return Value:
-// Returns the requested codegen flags, or 0 (i.e., no flags set) if no rejit attempt
-// can be found for the MD.
-//
-
-DWORD ReJitManager::GetCurrentReJitFlagsWorker(PTR_MethodDesc pMD)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_PREEMPTIVE;
- PRECONDITION(CheckPointer(pMD));
- }
- CONTRACTL_END;
-
- // Fast-path: If the rejit map is empty, no need to look up anything. Do this outside
- // of a lock to impact our caller (e.g., the JIT asking if it can inline) as little as possible. If the
- // map is nonempty, we'll acquire the lock at that point and do the lookup for real.
- if (m_table.GetCount() == 0)
- {
- return 0;
- }
-
- CrstHolder ch(&m_crstTable);
-
- for (ReJitInfoHash::KeyIterator iter = GetBeginIterator(pMD), end = GetEndIterator(pMD);
- iter != end;
- iter++)
- {
- ReJitInfo * pInfo = *iter;
- _ASSERTE(pInfo->GetMethodDesc() == pMD);
- _ASSERTE(pInfo->m_pShared != NULL);
-
- DWORD dwState = pInfo->m_pShared->GetState();
-
- if (dwState != SharedReJitInfo::kStateActive)
- {
- // Not active means we never asked profiler for the codegen flags OR the
- // rejit request has been reverted. So this one is useless.
- continue;
- }
-
- // Found it!
-#ifdef _DEBUG
- // This must be the only such ReJitInfo for this MethodDesc. Check the rest and
- // assert otherwise.
- {
- ReJitInfoHash::KeyIterator iterTest = iter;
- iterTest++;
-
- while(iterTest != end)
- {
- ReJitInfo * pInfoTest = *iterTest;
- _ASSERTE(pInfoTest->GetMethodDesc() == pMD);
- _ASSERTE(pInfoTest->m_pShared != NULL);
-
- DWORD dwStateTest = pInfoTest->m_pShared->GetState();
-
- if (dwStateTest == SharedReJitInfo::kStateActive)
- {
- _ASSERTE(!"Multiple active ReJitInfos for same MethodDesc");
- break;
- }
- iterTest++;
- }
- }
-#endif //_DEBUG
- return pInfo->m_pShared->m_dwCodegenFlags;
- }
-
- return 0;
-}
-
-//---------------------------------------------------------------------------------------
-//
-// Helper to find the matching ReJitInfo by methoddesc paired with either pCodeStart or
-// reJitId (exactly one should be non-zero, and will be used as the key for the lookup)
-//
-// Arguments:
-// * pMD - MethodDesc * to look up
-// * pCodeStart - PCODE of the particular rejit attempt to look up. NULL if looking
-// up by ReJITID.
-// * reJitId - ReJITID of the particular rejit attempt to look up. NULL if looking
-// up by PCODE.
-//
-// Return Value:
-// ReJitInfo * matching input parameters, or NULL if no such ReJitInfo could be
-// found.
-//
-// Assumptions:
-// Caller must be holding this ReJitManager's table lock.
-//
-
-PTR_ReJitInfo ReJitManager::FindReJitInfo(PTR_MethodDesc pMD, PCODE pCodeStart, ReJITID reJitId)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- INSTANCE_CHECK;
- PRECONDITION(CheckPointer(pMD));
- }
- CONTRACTL_END;
-
- // Caller should hold the Crst around calling this function and using the ReJitInfo.
-#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
-
- // One of these two keys should be used, but not both!
- _ASSERTE(
- ((pCodeStart != NULL) || (reJitId != 0)) &&
- !((pCodeStart != NULL) && (reJitId != 0)));
-
- for (ReJitInfoHash::KeyIterator iter = GetBeginIterator(pMD), end = GetEndIterator(pMD);
- iter != end;
- iter++)
- {
- PTR_ReJitInfo pInfo = *iter;
- _ASSERTE(pInfo->GetMethodDesc() == pMD);
- _ASSERTE(pInfo->m_pShared != NULL);
-
- if ((pCodeStart != NULL && pInfo->m_pCode == pCodeStart) || // pCodeStart is key
- (reJitId != 0 && pInfo->m_pShared->GetId() == reJitId)) // reJitId is key
- {
- return pInfo;
- }
- }
-
- return NULL;
+ return nativeCodeVersion.GetILCodeVersion().GetVersionId();
}
//---------------------------------------------------------------------------------------
@@ -2934,7 +992,7 @@ PTR_ReJitInfo ReJitManager::FindReJitInfo(PTR_MethodDesc pMD, PCODE pCodeStart,
// cReJitIds were returned and cReJitIds < *pcReJitId (latter being the total
// number of ReJITIDs available).
//
-
+// static
HRESULT ReJitManager::GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * pcReJitIds, ReJITID reJitIds[])
{
CONTRACTL
@@ -2942,31 +1000,29 @@ HRESULT ReJitManager::GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * p
NOTHROW;
CAN_TAKE_LOCK;
GC_NOTRIGGER;
- INSTANCE_CHECK;
PRECONDITION(CheckPointer(pMD));
PRECONDITION(pcReJitIds != NULL);
PRECONDITION(reJitIds != NULL);
}
CONTRACTL_END;
- CrstHolder ch(&m_crstTable);
+ CodeVersionManager* pCodeVersionManager = pMD->GetCodeVersionManager();
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
ULONG cnt = 0;
- for (ReJitInfoHash::KeyIterator iter = GetBeginIterator(pMD), end = GetEndIterator(pMD);
+ ILCodeVersionCollection ilCodeVersions = pCodeVersionManager->GetILCodeVersions(pMD);
+ for (ILCodeVersionIterator iter = ilCodeVersions.Begin(), end = ilCodeVersions.End();
iter != end;
iter++)
{
- ReJitInfo * pInfo = *iter;
- _ASSERTE(pInfo->GetMethodDesc() == pMD);
- _ASSERTE(pInfo->m_pShared != NULL);
+ ILCodeVersion curILVersion = *iter;
- if (pInfo->m_pShared->GetState() == SharedReJitInfo::kStateActive ||
- pInfo->m_pShared->GetState() == SharedReJitInfo::kStateReverted)
+ if (curILVersion.GetRejitState() == ILCodeVersion::kStateActive)
{
if (cnt < cReJitIds)
{
- reJitIds[cnt] = pInfo->m_pShared->GetId();
+ reJitIds[cnt] = curILVersion.GetVersionId();
}
++cnt;
@@ -2979,975 +1035,7 @@ HRESULT ReJitManager::GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * p
return (cnt > cReJitIds) ? S_FALSE : S_OK;
}
-//---------------------------------------------------------------------------------------
-//
-// Helper that inits a new ReJitReportErrorWorkItem and adds it to the pErrors array
-//
-// Arguments:
-// * pModule - The module in the module/MethodDef identifier pair for the method which
-// had an error during rejit
-// * methodDef - The MethodDef in the module/MethodDef identifier pair for the method which
-// had an error during rejit
-// * pMD - If available, the specific method instance which had an error during rejit
-// * hrStatus - HRESULT for the rejit error that occurred
-// * pErrors - the list of error records that this method will append to
-//
-// Return Value:
-// * S_OK: error was appended
-// * E_OUTOFMEMORY: Not enough memory to create the new error item. The array is unchanged.
-//
-
-//static
-HRESULT ReJitManager::AddReJITError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus, CDynArray<ReJitReportErrorWorkItem> * pErrors)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- ReJitReportErrorWorkItem* pError = pErrors->Append();
- if (pError == NULL)
- {
- return E_OUTOFMEMORY;
- }
- pError->pModule = pModule;
- pError->methodDef = methodDef;
- pError->pMethodDesc = pMD;
- pError->hrStatus = hrStatus;
- return S_OK;
-}
-
-//---------------------------------------------------------------------------------------
-//
-// Helper that inits a new ReJitReportErrorWorkItem and adds it to the pErrors array
-//
-// Arguments:
-// * pReJitInfo - The method which had an error during rejit
-// * hrStatus - HRESULT for the rejit error that occurred
-// * pErrors - the list of error records that this method will append to
-//
-// Return Value:
-// * S_OK: error was appended
-// * E_OUTOFMEMORY: Not enough memory to create the new error item. The array is unchanged.
-//
-
-//static
-HRESULT ReJitManager::AddReJITError(ReJitInfo* pReJitInfo, HRESULT hrStatus, CDynArray<ReJitReportErrorWorkItem> * pErrors)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- Module * pModule = NULL;
- mdMethodDef methodDef = mdTokenNil;
- pReJitInfo->GetModuleAndTokenRegardlessOfKeyType(&pModule, &methodDef);
- return AddReJITError(pModule, methodDef, pReJitInfo->GetMethodDesc(), hrStatus, pErrors);
-}
-
-#ifdef _DEBUG
-//---------------------------------------------------------------------------------------
-//
-// Debug-only helper used while iterating through the hash table of
-// ReJitInfos to verify that all entries between the specified iterators are
-// reverted. Asserts if it finds any non-reverted entries.
-//
-// Arguments:
-// * iter - Iterator to start verifying at
-// * end - Iterator to stop verifying at
-//
-//
-
-void ReJitManager::AssertRestOfEntriesAreReverted(
- ReJitInfoHash::KeyIterator iter,
- ReJitInfoHash::KeyIterator end)
-{
- LIMITED_METHOD_CONTRACT;
-
- // All other rejits should be in the reverted state
- while (++iter != end)
- {
- _ASSERTE((*iter)->m_pShared->GetState() == SharedReJitInfo::kStateReverted);
- }
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Debug-only helper to dump ReJitManager contents to stdout. Only used if
-// COMPlus_ProfAPI_EnableRejitDiagnostics is set.
-//
-// Arguments:
-// * szIntroText - Intro text passed by caller to be output before this ReJitManager
-// is dumped.
-//
-//
-
-void ReJitManager::Dump(LPCSTR szIntroText)
-{
- if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ProfAPI_EnableRejitDiagnostics) == 0)
- return;
-
- printf(szIntroText);
- fflush(stdout);
-
- CrstHolder ch(&m_crstTable);
-
- printf("BEGIN ReJitManager::Dump: 0x%p\n", this);
-
- for (ReJitInfoHash::Iterator iterCur = m_table.Begin(), iterEnd = m_table.End();
- iterCur != iterEnd;
- iterCur++)
- {
- ReJitInfo * pInfo = *iterCur;
- printf(
- "\tInfo 0x%p: State=0x%x, Next=0x%p, Shared=%p, SharedState=0x%x\n",
- pInfo,
- pInfo->GetState(),
- (void*)pInfo->m_pNext,
- (void*)pInfo->m_pShared,
- pInfo->m_pShared->GetState());
-
- switch(pInfo->m_key.m_keyType)
- {
- case ReJitInfo::Key::kMethodDesc:
- printf(
- "\t\tMD=0x%p, %s.%s (%s)\n",
- (void*)pInfo->GetMethodDesc(),
- pInfo->GetMethodDesc()->m_pszDebugClassName,
- pInfo->GetMethodDesc()->m_pszDebugMethodName,
- pInfo->GetMethodDesc()->m_pszDebugMethodSignature);
- break;
-
- case ReJitInfo::Key::kMetadataToken:
- Module * pModule;
- mdMethodDef methodDef;
- pInfo->GetModuleAndToken(&pModule, &methodDef);
- printf(
- "\t\tModule=0x%p, Token=0x%x\n",
- pModule,
- methodDef);
- break;
-
- case ReJitInfo::Key::kUninitialized:
- printf("\t\tUNINITIALIZED\n");
- break;
-
- default:
- _ASSERTE(!"Unrecognized pInfo key type");
- }
- fflush(stdout);
- }
- printf("END ReJitManager::Dump: 0x%p\n", this);
- fflush(stdout);
-}
-
-#endif // _DEBUG
-
-//---------------------------------------------------------------------------------------
-// ReJitInfo implementation
-
-// All the state-changey stuff is kept up here in the !DACCESS_COMPILE block.
-// The more read-only inspection-y stuff follows the block.
-
-
-#ifndef DACCESS_COMPILE
-
-//---------------------------------------------------------------------------------------
-//
-// Do the actual work of stamping the top of originally-jitted-code with a jmp that goes
-// to the prestub. This can be called in one of three ways:
-// * Case 1: By RequestReJIT against an already-jitted function, in which case the
-// PCODE may be inferred by the MethodDesc, and our caller will have suspended
-// the EE for us, OR
-// * Case 2: By the prestub worker after jitting the original code of a function
-// (i.e., the "pre-rejit" scenario). In this case, the EE is not suspended. But
-// that's ok, because the PCODE has not yet been published to the MethodDesc, and
-// no thread can be executing inside the originally JITted function yet.
-// * Case 3: At type/method restore time for an NGEN'ed assembly. This is also the pre-rejit
-// scenario because we are guaranteed to do this before the code in the module
-// is executable. EE suspend is not required.
-//
-// Arguments:
-// * pCode - Case 1 (above): will be NULL, and we can infer the PCODE from the
-// MethodDesc; Case 2+3 (above, pre-rejit): will be non-NULL, and we'll need to use
-// this to find the code to stamp on top of.
-//
-// Return Value:
-// * S_OK: Either we successfully did the jmp-stamp, or a racing thread took care of
-// it for us.
-// * Else, HRESULT indicating failure.
-//
-// Assumptions:
-// The caller will have suspended the EE if necessary (case 1), before this is
-// called.
-//
-HRESULT ReJitInfo::JumpStampNativeCode(PCODE pCode /* = NULL */)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
-
- // It may seem dangerous to be stamping jumps over code while a GC is going on,
- // but we're actually safe. As we assert below, either we're holding the thread
- // store lock (and thus preventing a GC) OR we're stamping code that has not yet
- // been published (and will thus not be executed by managed therads or examined
- // by the GC).
- MODE_ANY;
- }
- CONTRACTL_END;
-
- PCODE pCodePublished = GetMethodDesc()->GetNativeCode();
-
- _ASSERTE((pCode != NULL) || (pCodePublished != NULL));
- _ASSERTE(GetMethodDesc()->GetReJitManager()->IsTableCrstOwnedByCurrentThread());
-
- HRESULT hr = S_OK;
-
- // We'll jump-stamp over pCode, or if pCode is NULL, jump-stamp over the published
- // code for this's MethodDesc.
- LPBYTE pbCode = (LPBYTE) pCode;
- if (pbCode == NULL)
- {
- // If caller didn't specify a pCode, just use the one that was published after
- // the original JIT. (A specific pCode would be passed in the pre-rejit case,
- // to jump-stamp the original code BEFORE the PCODE gets published.)
- pbCode = (LPBYTE) pCodePublished;
- }
- _ASSERTE (pbCode != NULL);
-
- // The debugging API may also try to write to the very top of this function (though
- // with an int 3 for breakpoint purposes). Coordinate with the debugger so we know
- // whether we can safely patch the actual code, or instead write to the debugger's
- // buffer.
- DebuggerController::ControllerLockHolder lockController;
-
- // We could be in a race. Either two threads simultaneously JITting the same
- // method for the first time or two threads restoring NGEN'ed code.
- // Another thread may (or may not) have jump-stamped its copy of the code already
- _ASSERTE((GetState() == kJumpNone) || (GetState() == kJumpToPrestub));
-
- if (GetState() == kJumpToPrestub)
- {
- // The method has already been jump stamped so nothing left to do
- _ASSERTE(CodeIsSaved());
- return S_OK;
- }
-
- // Remember what we're stamping our jump on top of, so we can replace it during a
- // revert.
- for (int i = 0; i < sizeof(m_rgSavedCode); i++)
- {
- m_rgSavedCode[i] = *FirstCodeByteAddr(pbCode+i, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)(pbCode+i)));
- }
-
- EX_TRY
- {
- AllocMemTracker amt;
-
- // This guy might throw on out-of-memory, so rely on the tracker to clean-up
- Precode * pPrecode = Precode::Allocate(PRECODE_STUB, GetMethodDesc(), GetMethodDesc()->GetLoaderAllocator(), &amt);
- PCODE target = pPrecode->GetEntryPoint();
-
-#if defined(_X86_) || defined(_AMD64_)
-
- // Normal unpatched code never starts with a jump
- // so make sure this code isn't already patched
- _ASSERTE(*FirstCodeByteAddr(pbCode, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)pbCode)) != X86_INSTR_JMP_REL32);
-
- INT64 i64OldCode = *(INT64*)pbCode;
- INT64 i64NewCode = i64OldCode;
- LPBYTE pbNewValue = (LPBYTE)&i64NewCode;
- *pbNewValue = X86_INSTR_JMP_REL32;
- INT32 UNALIGNED * pOffset = reinterpret_cast<INT32 UNALIGNED *>(&pbNewValue[1]);
- // This will throw for out-of-memory, so don't write anything until
- // after he succeeds
- // This guy will leak/cache/reuse the jumpstub
- *pOffset = rel32UsingJumpStub(reinterpret_cast<INT32 UNALIGNED *>(pbCode + 1), target, GetMethodDesc(), GetMethodDesc()->GetLoaderAllocator());
-
- // If we have the EE suspended or the code is unpublished there won't be contention on this code
- hr = UpdateJumpStampHelper(pbCode, i64OldCode, i64NewCode, FALSE);
- if (FAILED(hr))
- {
- ThrowHR(hr);
- }
-
- //
- // No failure point after this!
- //
- amt.SuppressRelease();
-
-#else // _X86_ || _AMD64_
-#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
-
-#endif // _X86_ || _AMD64_
-
- m_dwInternalFlags &= ~kStateMask;
- m_dwInternalFlags |= kJumpToPrestub;
- }
- EX_CATCH_HRESULT(hr);
- _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
-
- if (SUCCEEDED(hr))
- {
- _ASSERTE(GetState() == kJumpToPrestub);
- _ASSERTE(m_rgSavedCode[0] != 0); // saved code should not start with 0
- }
-
- return hr;
-}
-
-//---------------------------------------------------------------------------------------
-//
-// Poke the JITted code to satsify a revert request (or to perform an implicit revert as
-// part of a second, third, etc. rejit request). Reinstates the originally JITted code
-// that had been jump-stamped over to perform a prior rejit.
-//
-// Arguments
-// fEESuspended - TRUE if the caller keeps the EE suspended during this call
-//
-//
-// Return Value:
-// S_OK to indicate the revert succeeded,
-// CORPROF_E_RUNTIME_SUSPEND_REQUIRED to indicate the jumpstamp hasn't been reverted
-// and EE suspension will be needed for success
-// other failure HRESULT indicating what went wrong.
-//
-// Assumptions:
-// Caller must be holding the owning ReJitManager's table crst.
-//
-
-HRESULT ReJitInfo::UndoJumpStampNativeCode(BOOL fEESuspended)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- _ASSERTE(GetMethodDesc()->GetReJitManager()->IsTableCrstOwnedByCurrentThread());
- _ASSERTE((m_pShared->GetState() == SharedReJitInfo::kStateReverted));
- _ASSERTE((GetState() == kJumpToPrestub) || (GetState() == kJumpToRejittedCode));
- _ASSERTE(m_rgSavedCode[0] != 0); // saved code should not start with 0 (see above test)
-
- BYTE * pbCode = (BYTE*)GetMethodDesc()->GetNativeCode();
- DebuggerController::ControllerLockHolder lockController;
-
-#if defined(_X86_) || defined(_AMD64_)
- _ASSERTE(m_rgSavedCode[0] != X86_INSTR_JMP_REL32);
- _ASSERTE(*FirstCodeByteAddr(pbCode, DebuggerController::GetPatchTable()->GetPatch((CORDB_ADDRESS_TYPE *)pbCode)) == X86_INSTR_JMP_REL32);
-#else
-#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
-#endif // _X86_ || _AMD64_
-
- // For the interlocked compare, remember what pbCode is right now
- INT64 i64OldValue = *(INT64 *)pbCode;
- // Assemble the INT64 of the new code bytes to write. Start with what's there now
- INT64 i64NewValue = i64OldValue;
- memcpy(LPBYTE(&i64NewValue), m_rgSavedCode, sizeof(m_rgSavedCode));
- HRESULT hr = UpdateJumpStampHelper(pbCode, i64OldValue, i64NewValue, !fEESuspended);
- _ASSERTE(hr == S_OK || (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED && !fEESuspended));
- if (hr != S_OK)
- return hr;
-
- // Transition state of this ReJitInfo to indicate the MD no longer has any jump stamp
- m_dwInternalFlags &= ~kStateMask;
- m_dwInternalFlags |= kJumpNone;
- return S_OK;
-}
-
-//---------------------------------------------------------------------------------------
-//
-// After code has been rejitted, this is called to update the jump-stamp to go from
-// pointing to the prestub, to pointing to the newly rejitted code.
-//
-// Arguments:
-// fEESuspended - TRUE if the caller keeps the EE suspended during this call
-// pRejittedCode - jitted code for the updated IL this method should execute
-//
-// Assumptions:
-// This rejit manager's table crst should be held by the caller
-//
-// Returns - S_OK if the jump target is updated
-// CORPROF_E_RUNTIME_SUSPEND_REQUIRED if the ee isn't suspended and it
-// will need to be in order to do the update safely
-HRESULT ReJitInfo::UpdateJumpTarget(BOOL fEESuspended, PCODE pRejittedCode)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_PREEMPTIVE;
- }
- CONTRACTL_END;
-
- MethodDesc * pMD = GetMethodDesc();
- _ASSERTE(pMD->GetReJitManager()->IsTableCrstOwnedByCurrentThread());
- _ASSERTE(m_pShared->GetState() == SharedReJitInfo::kStateActive);
- _ASSERTE(GetState() == kJumpToPrestub);
- _ASSERTE(m_pCode == NULL);
-
- // Beginning of originally JITted code containing the jmp that we will redirect.
- BYTE * pbCode = (BYTE*)pMD->GetNativeCode();
-
-#if defined(_X86_) || defined(_AMD64_)
-
- HRESULT hr = S_OK;
- {
- DebuggerController::ControllerLockHolder lockController;
-
- // This will throw for out-of-memory, so don't write anything until
- // after he succeeds
- // This guy will leak/cache/reuse the jumpstub
- INT32 offset = 0;
- EX_TRY
- {
- offset = rel32UsingJumpStub(
- reinterpret_cast<INT32 UNALIGNED *>(&pbCode[1]), // base of offset
- pRejittedCode, // target of jump
- pMD,
- pMD->GetLoaderAllocator());
- }
- EX_CATCH_HRESULT(hr);
- _ASSERT(hr == S_OK || hr == E_OUTOFMEMORY);
- if (FAILED(hr))
- {
- return hr;
- }
- // For validation later, remember what pbCode is right now
- INT64 i64OldValue = *(INT64 *)pbCode;
-
- // Assemble the INT64 of the new code bytes to write. Start with what's there now
- INT64 i64NewValue = i64OldValue;
- LPBYTE pbNewValue = (LPBYTE)&i64NewValue;
-
- // First byte becomes a rel32 jmp instruction (should be a no-op as asserted
- // above, but can't hurt)
- *pbNewValue = X86_INSTR_JMP_REL32;
- // Next 4 bytes are the jmp target (offset to jmp stub)
- INT32 UNALIGNED * pnOffset = reinterpret_cast<INT32 UNALIGNED *>(&pbNewValue[1]);
- *pnOffset = offset;
-
- hr = UpdateJumpStampHelper(pbCode, i64OldValue, i64NewValue, !fEESuspended);
- _ASSERTE(hr == S_OK || (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED && !fEESuspended));
- }
- if (FAILED(hr))
- {
- return hr;
- }
-
-#else // _X86_ || _AMD64_
-#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
-#endif // _X86_ || _AMD64_
-
- // State transition
- m_dwInternalFlags &= ~kStateMask;
- m_dwInternalFlags |= kJumpToRejittedCode;
- return S_OK;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// This is called to modify the jump-stamp area, the first ReJitInfo::JumpStubSize bytes
-// in the method's code.
-//
-// Notes:
-// Callers use this method in a variety of circumstances:
-// a) when the code is unpublished (fContentionPossible == FALSE)
-// b) when the caller has taken the ThreadStoreLock and suspended the EE
-// (fContentionPossible == FALSE)
-// c) when the code is published, the EE isn't suspended, and the jumpstamp
-// area consists of a single 5 byte long jump instruction
-// (fContentionPossible == TRUE)
-// This method will attempt to alter the jump-stamp even if the caller has not prevented
-// contention, but there is no guarantee it will be succesful. When the caller has prevented
-// contention, then success is assured. Callers may oportunistically try without
-// EE suspension, and then upgrade to EE suspension if the first attempt fails.
-//
-// Assumptions:
-// This rejit manager's table crst should be held by the caller or fContentionPossible==FALSE
-// The debugger patch table lock should be held by the caller
-//
-// Arguments:
-// pbCode - pointer to the code where the jump stamp is placed
-// i64OldValue - the bytes which should currently be at the start of the method code
-// i64NewValue - the new bytes which should be written at the start of the method code
-// fContentionPossible - See the Notes section above.
-//
-// Returns:
-// S_OK => the jumpstamp has been succesfully updated.
-// CORPROF_E_RUNTIME_SUSPEND_REQUIRED => the jumpstamp remains unchanged (preventing contention will be necessary)
-// other failing HR => VirtualProtect failed, the jumpstamp remains unchanged
-//
-HRESULT ReJitInfo::UpdateJumpStampHelper(BYTE* pbCode, INT64 i64OldValue, INT64 i64NewValue, BOOL fContentionPossible)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- MethodDesc * pMD = GetMethodDesc();
- _ASSERTE(pMD->GetReJitManager()->IsTableCrstOwnedByCurrentThread() || !fContentionPossible);
-
- // When ReJIT is enabled, method entrypoints are always at least 8-byte aligned (see
- // code:EEJitManager::allocCode), so we can do a single 64-bit interlocked operation
- // to update the jump target. However, some code may have gotten compiled before
- // the profiler had a chance to enable ReJIT (e.g., NGENd code, or code JITted
- // before a profiler attaches). In such cases, we cannot rely on a simple
- // interlocked operation, and instead must suspend the runtime to ensure we can
- // safely update the jmp instruction.
- //
- // This method doesn't verify that the method is actually safe to rejit, we expect
- // callers to do that. At the moment NGEN'ed code is safe to rejit even if
- // it is unaligned, but code generated before the profiler attaches is not.
- if (fContentionPossible && !(IS_ALIGNED(pbCode, sizeof(INT64))))
- {
- return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
- }
-
- // The debugging API may also try to write to this function (though
- // with an int 3 for breakpoint purposes). Coordinate with the debugger so we know
- // whether we can safely patch the actual code, or instead write to the debugger's
- // buffer.
- if (fContentionPossible)
- {
- for (CORDB_ADDRESS_TYPE* pbProbeAddr = pbCode; pbProbeAddr < pbCode + ReJitInfo::JumpStubSize; pbProbeAddr++)
- {
- if (NULL != DebuggerController::GetPatchTable()->GetPatch(pbProbeAddr))
- {
- return CORPROF_E_RUNTIME_SUSPEND_REQUIRED;
- }
- }
- }
-
-#if defined(_X86_) || defined(_AMD64_)
-
- DWORD oldProt;
- if (!ClrVirtualProtect((LPVOID)pbCode, 8, PAGE_EXECUTE_READWRITE, &oldProt))
- {
- return HRESULT_FROM_WIN32(GetLastError());
- }
-
- if (fContentionPossible)
- {
- INT64 i64InterlockReportedOldValue = FastInterlockCompareExchangeLong((INT64 *)pbCode, i64NewValue, i64OldValue);
- // Since changes to these bytes are protected by this rejitmgr's m_crstTable, we
- // shouldn't have two writers conflicting.
- _ASSERTE(i64InterlockReportedOldValue == i64OldValue);
- }
- else
- {
- // In this path the caller ensures:
- // a) no thread will execute through the prologue area we are modifying
- // b) no thread is stopped in a prologue such that it resumes in the middle of code we are modifying
- // c) no thread is doing a debugger patch skip operation in which an unmodified copy of the method's
- // code could be executed from a patch skip buffer.
-
- // PERF: we might still want a faster path through here if we aren't debugging that doesn't do
- // all the patch checks
- for (int i = 0; i < ReJitInfo::JumpStubSize; i++)
- {
- *FirstCodeByteAddr(pbCode+i, DebuggerController::GetPatchTable()->GetPatch(pbCode+i)) = ((BYTE*)&i64NewValue)[i];
- }
- }
-
- if (oldProt != PAGE_EXECUTE_READWRITE)
- {
- // The CLR codebase in many locations simply ignores failures to restore the page protections
- // Its true that it isn't a problem functionally, but it seems a bit sketchy?
- // I am following the convention for now.
- ClrVirtualProtect((LPVOID)pbCode, 8, oldProt, &oldProt);
- }
-
- FlushInstructionCache(GetCurrentProcess(), pbCode, ReJitInfo::JumpStubSize);
- return S_OK;
-
-#else // _X86_ || _AMD64_
-#error "Need to define a way to jump-stamp the prolog in a safe way for this platform"
-#endif // _X86_ || _AMD64_
-}
-
-
-#endif // DACCESS_COMPILE
-// The rest of the ReJitInfo methods are safe to compile for DAC
-
-
-
-//---------------------------------------------------------------------------------------
-//
-// ReJitInfos can be constructed in two ways: As a "regular" ReJitInfo indexed by
-// MethodDesc *, or as a "placeholder" ReJitInfo (to satisfy pre-rejit requests) indexed
-// by (Module *, methodDef). Both constructors call this helper to do all the common
-// code for initializing the ReJitInfo.
-//
-
-void ReJitInfo::CommonInit()
-{
- LIMITED_METHOD_CONTRACT;
-
- m_pCode = NULL;
- m_pNext = NULL;
- m_dwInternalFlags = kJumpNone;
- m_pShared->AddMethod(this);
- ZeroMemory(m_rgSavedCode, sizeof(m_rgSavedCode));
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Regardless of which kind of ReJitInfo this is, this will always return its
-// corresponding Module * & methodDef
-//
-// Arguments:
-// * ppModule - [out] Module * related to this ReJitInfo (which contains the
-// returned methodDef)
-// * pMethodDef - [out] methodDef related to this ReJitInfo
-//
-
-void ReJitInfo::GetModuleAndTokenRegardlessOfKeyType(Module ** ppModule, mdMethodDef * pMethodDef)
-{
- CONTRACTL
- {
- NOTHROW;
- GC_NOTRIGGER;
- MODE_ANY;
- SO_NOT_MAINLINE;
- }
- CONTRACTL_END;
-
- _ASSERTE(ppModule != NULL);
- _ASSERTE(pMethodDef != NULL);
-
- if (m_key.m_keyType == Key::kMetadataToken)
- {
- GetModuleAndToken(ppModule, pMethodDef);
- }
- else
- {
- MethodDesc * pMD = GetMethodDesc();
- _ASSERTE(pMD != NULL);
- _ASSERTE(pMD->IsRestored());
-
- *ppModule = pMD->GetModule();
- *pMethodDef = pMD->GetMemberDef();
- }
-
- _ASSERTE(*ppModule != NULL);
- _ASSERTE(*pMethodDef != mdTokenNil);
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Used as part of the hash table implementation in the containing ReJitManager, this
-// hashes a ReJitInfo by MethodDesc * when available, else by (Module *, methodDef)
-//
-// Arguments:
-// key - Key representing the ReJitInfo to hash
-//
-// Return Value:
-// Hash value of the ReJitInfo represented by the specified key
-//
-
-// static
-COUNT_T ReJitInfo::Hash(Key key)
-{
- LIMITED_METHOD_CONTRACT;
-
- if (key.m_keyType == Key::kMethodDesc)
- {
- return HashPtr(0, PTR_MethodDesc(key.m_pMD));
- }
-
- _ASSERTE (key.m_keyType == Key::kMetadataToken);
-
- return HashPtr(key.m_methodDef, PTR_Module(key.m_pModule));
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Return the IL to compile for a given ReJitInfo
-//
-// Return Value:
-// Pointer to IL buffer to compile. If the profiler has specified IL to rejit,
-// this will be our copy of the IL buffer specified by the profiler. Else, this
-// points to the original IL for the method from its module's metadata.
-//
-// Notes:
-// IL memory is managed by us, not the caller. Caller must not free the buffer.
-//
-
-COR_ILMETHOD * ReJitInfo::GetIL()
-{
- CONTRACTL
- {
- THROWS; // Getting original IL via PEFile::GetIL can throw
- CAN_TAKE_LOCK; // Looking up dynamically overridden IL takes a lock
- GC_NOTRIGGER;
- MODE_ANY;
- }
- CONTRACTL_END;
-
- if (m_pShared->m_pbIL != NULL)
- {
- return reinterpret_cast<COR_ILMETHOD *>(m_pShared->m_pbIL);
- }
-
- // If the user hasn't overriden us, get whatever the original IL had
- return GetMethodDesc()->GetILHeader(TRUE);
-}
-
-
-//---------------------------------------------------------------------------------------
-// SharedReJitInfo implementation
-
-
-SharedReJitInfo::SharedReJitInfo()
- : m_dwInternalFlags(kStateRequested),
- m_pbIL(NULL),
- m_dwCodegenFlags(0),
- m_reJitId(InterlockedIncrement(reinterpret_cast<LONG*>(&s_GlobalReJitId))),
- m_pInfoList(NULL)
-{
- LIMITED_METHOD_CONTRACT;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Link in the specified ReJitInfo to the list maintained by this SharedReJitInfo
-//
-// Arguments:
-// pInfo - ReJitInfo being added
-//
-
-void SharedReJitInfo::AddMethod(ReJitInfo * pInfo)
-{
- LIMITED_METHOD_CONTRACT;
-
- _ASSERTE(pInfo->m_pShared == this);
-
- // Push it on the head of our list
- _ASSERTE(pInfo->m_pNext == NULL);
- pInfo->m_pNext = PTR_ReJitInfo(m_pInfoList);
- m_pInfoList = pInfo;
-}
-
-
-//---------------------------------------------------------------------------------------
-//
-// Unlink the specified ReJitInfo from the list maintained by this SharedReJitInfo.
-// Currently this is only used on AD unload to remove ReJitInfos of non-domain-neutral instantiations
-// of domain-neutral generics (which are tracked in the SharedDomain's ReJitManager).
-// This may be used in the future once we implement memory reclamation on revert().
-//
-// Arguments:
-// pInfo - ReJitInfo being removed
-//
-
-void SharedReJitInfo::RemoveMethod(ReJitInfo * pInfo)
-{
- LIMITED_METHOD_CONTRACT;
-
-#ifndef DACCESS_COMPILE
-
- // Find it
- ReJitInfo ** ppEntry = &m_pInfoList;
- while (*ppEntry != pInfo)
- {
- ppEntry = &(*ppEntry)->m_pNext;
- _ASSERTE(*ppEntry != NULL);
- }
-
- // Remove it
- _ASSERTE((*ppEntry)->m_pShared == this);
- *ppEntry = (*ppEntry)->m_pNext;
-
-#endif // DACCESS_COMPILE
-}
-
-//---------------------------------------------------------------------------------------
-//
-// MethodDesc::MakeJitWorker() calls this to determine if there's an outstanding
-// "pre-rejit" request for a MethodDesc that has just been jitted for the first time.
-// This is also called when methods are being restored in NGEN images. The sequence looks like:
-// *Enter holder
-// Enter Rejit table lock
-// DoJumpStampIfNecessary
-// *Runtime code publishes/restores method
-// *Exit holder
-// Leave rejit table lock
-// Send rejit error callbacks if needed
-//
-// This also has a non-locking early-out if ReJIT is not enabled.
-//
-// #PublishCode:
-// Note that the runtime needs to publish/restore the PCODE while this holder is
-// on the stack, so it can happen under the ReJitManager's lock.
-// This prevents a "lost pre-rejit" race with a profiler that calls
-// RequestReJIT just as the method finishes compiling. In particular, the locking ensures
-// atomicity between this set of steps (performed in DoJumpStampIfNecessary):
-// * (1) Checking whether there is a pre-rejit request for this MD
-// * (2) If not, skip doing the pre-rejit-jmp-stamp
-// * (3) Publishing the PCODE
-//
-// with respect to these steps performed in RequestReJIT:
-// * (a) Is PCODE published yet?
-// * (b) If not, create pre-rejit (placeholder) ReJitInfo which the prestub will
-// consult when it JITs the original IL
-//
-// Without this atomicity, we could get the ordering (1), (2), (a), (b), (3), resulting
-// in the rejit request getting completely ignored (i.e., we file away the pre-rejit
-// placeholder AFTER the prestub checks for it).
-//
-// A similar race is possible for code being restored. In that case the restoring thread
-// does:
-// * (1) Check if there is a pre-rejit request for this MD
-// * (2) If not, no need to jmp-stamp
-// * (3) Restore the MD
-
-// And RequestRejit does:
-// * (a) [In LoadedMethodDescIterator] Is a potential MD restored yet?
-// * (b) [In MarkInstantiationsForReJit] If not, don't queue it for jump-stamping
-//
-// Same ordering (1), (2), (a), (b), (3) results in missing both opportunities to jump
-// stamp.
-
-#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
-ReJitPublishMethodHolder::ReJitPublishMethodHolder(MethodDesc* pMethodDesc, PCODE pCode) :
-m_pMD(NULL), m_hr(S_OK)
-{
- // This method can't have a contract because entering the table lock
- // below increments GCNoTrigger count. Contracts always revert these changes
- // at the end of the method but we need the incremented count to flow out of the
- // method. The balancing decrement occurs in the destructor.
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_CAN_TAKE_LOCK;
- STATIC_CONTRACT_MODE_ANY;
-
- // We come here from the PreStub and from MethodDesc::CheckRestore
- // The method should be effectively restored, but we haven't yet
- // cleared the unrestored bit so we can't assert pMethodDesc->IsRestored()
- // We can assert:
- _ASSERTE(pMethodDesc->GetMethodTable()->IsRestored());
-
- if (ReJitManager::IsReJITEnabled() && (pCode != NULL))
- {
- m_pMD = pMethodDesc;
- ReJitManager* pReJitManager = pMethodDesc->GetReJitManager();
- pReJitManager->m_crstTable.Enter();
- m_hr = pReJitManager->DoJumpStampIfNecessary(pMethodDesc, pCode);
- }
-}
-
-
-ReJitPublishMethodHolder::~ReJitPublishMethodHolder()
-{
- // This method can't have a contract because leaving the table lock
- // below decrements GCNoTrigger count. Contracts always revert these changes
- // at the end of the method but we need the decremented count to flow out of the
- // method. The balancing increment occurred in the constructor.
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_TRIGGERS; // NOTRIGGER until we leave the lock
- STATIC_CONTRACT_CAN_TAKE_LOCK;
- STATIC_CONTRACT_MODE_ANY;
-
- if (m_pMD)
- {
- ReJitManager* pReJitManager = m_pMD->GetReJitManager();
- pReJitManager->m_crstTable.Leave();
- if (FAILED(m_hr))
- {
- ReJitManager::ReportReJITError(m_pMD->GetModule(), m_pMD->GetMemberDef(), m_pMD, m_hr);
- }
- }
-}
-
-ReJitPublishMethodTableHolder::ReJitPublishMethodTableHolder(MethodTable* pMethodTable) :
-m_pMethodTable(NULL)
-{
- // This method can't have a contract because entering the table lock
- // below increments GCNoTrigger count. Contracts always revert these changes
- // at the end of the method but we need the incremented count to flow out of the
- // method. The balancing decrement occurs in the destructor.
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_NOTRIGGER;
- STATIC_CONTRACT_CAN_TAKE_LOCK;
- STATIC_CONTRACT_MODE_ANY;
-
- // We come here from MethodTable::SetIsRestored
- // The method table should be effectively restored, but we haven't yet
- // cleared the unrestored bit so we can't assert pMethodTable->IsRestored()
-
- if (ReJitManager::IsReJITEnabled())
- {
- m_pMethodTable = pMethodTable;
- ReJitManager* pReJitManager = pMethodTable->GetModule()->GetReJitManager();
- pReJitManager->m_crstTable.Enter();
- MethodTable::IntroducedMethodIterator itMethods(pMethodTable, FALSE);
- for (; itMethods.IsValid(); itMethods.Next())
- {
- // Although the MethodTable is restored, the methods might not be.
- // We need to be careful to only query portions of the MethodDesc
- // that work in a partially restored state. The only methods that need
- // further restoration are IL stubs (which aren't rejittable) and
- // generic methods. The only generic methods directly accesible from
- // the MethodTable are definitions. GetNativeCode() on generic defs
- // will run succesfully and return NULL which short circuits the
- // rest of the logic.
- MethodDesc * pMD = itMethods.GetMethodDesc();
- PCODE pCode = pMD->GetNativeCode();
- if (pCode != NULL)
- {
- HRESULT hr = pReJitManager->DoJumpStampIfNecessary(pMD, pCode);
- if (FAILED(hr))
- {
- ReJitManager::AddReJITError(pMD->GetModule(), pMD->GetMemberDef(), pMD, hr, &m_errors);
- }
- }
- }
- }
-}
-
-
-ReJitPublishMethodTableHolder::~ReJitPublishMethodTableHolder()
-{
- // This method can't have a contract because leaving the table lock
- // below decrements GCNoTrigger count. Contracts always revert these changes
- // at the end of the method but we need the decremented count to flow out of the
- // method. The balancing increment occurred in the constructor.
- STATIC_CONTRACT_NOTHROW;
- STATIC_CONTRACT_GC_TRIGGERS; // NOTRIGGER until we leave the lock
- STATIC_CONTRACT_CAN_TAKE_LOCK;
- STATIC_CONTRACT_MODE_ANY;
-
- if (m_pMethodTable)
- {
- ReJitManager* pReJitManager = m_pMethodTable->GetModule()->GetReJitManager();
- pReJitManager->m_crstTable.Leave();
- for (int i = 0; i < m_errors.Count(); i++)
- {
- ReJitManager::ReportReJITError(&(m_errors[i]));
- }
- }
-}
-#endif // !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
-
+#endif // FEATURE_CODE_VERSIONING
#else // FEATURE_REJIT
// On architectures that don't support rejit, just keep around some do-nothing
@@ -3972,19 +1060,6 @@ HRESULT ReJitManager::RequestRevert(
return E_NOTIMPL;
}
-// static
-void ReJitManager::OnAppDomainExit(AppDomain * pAppDomain)
-{
-}
-
-ReJitManager::ReJitManager()
-{
-}
-
-void ReJitManager::PreInit(BOOL fSharedDomain)
-{
-}
-
ReJITID ReJitManager::GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart)
{
return 0;
@@ -3995,11 +1070,6 @@ ReJITID ReJitManager::GetReJitIdNoLock(PTR_MethodDesc pMD, PCODE pCodeStart)
return 0;
}
-PCODE ReJitManager::GetCodeStart(PTR_MethodDesc pMD, ReJITID reJitId)
-{
- return NULL;
-}
-
HRESULT ReJitManager::GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * pcReJitIds, ReJITID reJitIds[])
{
return E_NOTIMPL;
diff --git a/src/vm/rejit.h b/src/vm/rejit.h
index 3c8bfd66b2..8401ecb960 100644
--- a/src/vm/rejit.h
+++ b/src/vm/rejit.h
@@ -19,9 +19,8 @@
#include "contractimpl.h"
#include "shash.h"
#include "corprof.h"
+#include "codeversion.h"
-struct ReJitInfo;
-struct SharedReJitInfo;
class ReJitManager;
class MethodDesc;
class ClrDataAccess;
@@ -68,347 +67,9 @@ protected:
COR_IL_MAP * m_rgInstrumentedMapEntries;
};
-//---------------------------------------------------------------------------------------
-// Helper base class used by the structures below to enforce that their
-// pieces get allocated on the appropriate loader heaps
-//
-struct LoaderHeapAllocatedRejitStructure
-{
-public:
- void * operator new (size_t size, LoaderHeap * pHeap, const NoThrow&);
- void * operator new (size_t size, LoaderHeap * pHeap);
-};
-
-//---------------------------------------------------------------------------------------
-// One instance of this per rejit request for each mdMethodDef. Contains IL and
-// compilation flags. This is used primarily as a structure, so most of its
-// members are left public.
-//
-struct SharedReJitInfo : public LoaderHeapAllocatedRejitStructure
-{
-private:
- // This determines what to use next as the value of the profiling API's ReJITID.
- static ReJITID s_GlobalReJitId;
-
-public:
- // These represent the various states a SharedReJitInfo can be in.
- enum InternalFlags
- {
- // The profiler has requested a ReJit, so we've allocated stuff, but we haven't
- // called back to the profiler to get any info or indicate that the ReJit has
- // started. (This Info can be 'reused' for a new ReJit if the
- // profiler calls RequestRejit again before we transition to the next state.)
- kStateRequested = 0x00000000,
-
- // The CLR has initiated the call to the profiler's GetReJITParameters() callback
- // but it hasn't completed yet. At this point we have to assume the profiler has
- // commited to a specific IL body, even if the CLR doesn't know what it is yet.
- // If the profiler calls RequestRejit we need to allocate a new SharedReJitInfo
- // and call GetReJITParameters() again.
- kStateGettingReJITParameters = 0x00000001,
-
- // We have asked the profiler about this method via ICorProfilerFunctionControl,
- // and have thus stored the IL and codegen flags the profiler specified. Can only
- // transition to kStateReverted from this state.
- kStateActive = 0x00000002,
-
- // The methoddef has been reverted, but not freed yet. It (or its instantiations
- // for generics) *MAY* still be active on the stack someplace or have outstanding
- // memory references.
- kStateReverted = 0x00000003,
-
-
- kStateMask = 0x0000000F,
- };
-
- DWORD m_dwInternalFlags;
-
- // Data
- LPBYTE m_pbIL;
- DWORD m_dwCodegenFlags;
- InstrumentedILOffsetMapping m_instrumentedILMap;
-
-private:
- // This is the value of the profiling API's ReJITID for this particular
- // rejit request.
- const ReJITID m_reJitId;
-
- // Children
- ReJitInfo * m_pInfoList;
-
-public:
- // Constructor
- SharedReJitInfo();
-
- // Intentionally no destructor. SharedReJitInfo and its contents are
- // allocated on a loader heap, so SharedReJitInfo and its contents will be
- // freed when the AD is unloaded.
-
- // Read-Only Identifcation
- ReJITID GetId() { return m_reJitId; }
-
- void AddMethod(ReJitInfo * pInfo);
-
- void RemoveMethod(ReJitInfo * pInfo);
-
- ReJitInfo * GetMethods() { return m_pInfoList; }
-
- InternalFlags GetState();
-};
-
-//---------------------------------------------------------------------------------------
-// One instance of this per rejit request for each MethodDesc*. One SharedReJitInfo
-// corresponds to many ReJitInfos, as the SharedReJitInfo tracks the rejit request for
-// the methodDef token whereas the ReJitInfo tracks the rejit request for each correspond
-// MethodDesc* (instantiation). Points to actual generated code.
-//
-// In the case of "pre-rejit" (see comment at top of rejit.cpp), a special "placeholder"
-// instance of ReJitInfo is used to "remember" to jmp-stamp a not-yet-jitted-method once
-// it finally gets jitted the first time.
-//
-// Each ReJitManager contains a hash table of ReJitInfo instances, keyed by
-// ReJitManager::m_key.
-//
-// This is used primarily as a structure, so most of its members are left public.
-//
-struct ReJitInfo : public LoaderHeapAllocatedRejitStructure
-{
-public:
- // The size of the code used to jump stamp the prolog
- static const size_t JumpStubSize =
-#if defined(_X86_) || defined(_AMD64_)
- 5;
-#else
-#error "Need to define size of rejit jump-stamp for this platform"
- 1;
-#endif
-
- // Used by PtrSHash template as the key for this ReJitInfo. For regular
- // ReJitInfos, the key is the MethodDesc*. For placeholder ReJitInfos
- // (to facilitate pre-rejit), the key is (Module*, mdMethodDef).
- struct Key
- {
- public:
- enum
- {
- // The key has not yet had its values initialized
- kUninitialized = 0x0,
-
- // The key represents a loaded MethodDesc, and is identified by the m_pMD
- // field
- kMethodDesc = 0x1,
-
- // The key represents a "placeholder" ReJitInfo identified not by loaded
- // MethodDesc, but by the module and metadata token (m_pModule,
- // m_methodDef).
- kMetadataToken = 0x2,
- };
-
- // Storage consists of a discriminated union between MethodDesc* or
- // (Module*, mdMethodDef), with the key type as the discriminator.
- union
- {
- TADDR m_pMD;
- TADDR m_pModule;
- };
- ULONG32 m_methodDef : 28;
- ULONG32 m_keyType : 2;
-
- Key();
- Key(PTR_MethodDesc pMD);
- Key(PTR_Module pModule, mdMethodDef methodDef);
- };
-
- static COUNT_T Hash(Key key);
-
- enum InternalFlags
- {
- // This ReJitInfo is either a placeholder (identified by module and
- // metadata token, rather than loaded MethodDesc) OR this ReJitInfo is
- // identified by a loaded MethodDesc that has been reverted OR not yet
- // been jump-stamped. In the last case, the time window where this
- // ReJitInfo would stay in kJumpNone is rather small, as
- // RequestReJIT() will immediately cause the originally JITted code to
- // be jump-stamped.
- kJumpNone = 0x00000000,
-
- // This ReJitInfo is identified by a loaded MethodDesc that has been compiled and
- // jump-stamped, with the target being the prestub. The MethodDesc has not yet
- // been rejitted
- kJumpToPrestub = 0x00000001,
-
- // This ReJitInfo is identified by a loaded MethodDesc that has been compiled AND
- // rejitted. The top of the originally JITted code has been jump-stamped, with
- // the target being the latest version of the rejitted code.
- kJumpToRejittedCode = 0x00000002,
-
- kStateMask = 0x0000000F,
- };
-
- Key m_key;
- DWORD m_dwInternalFlags;
-
- // The beginning of the rejitted code
- PCODE m_pCode;
-
- // The parent SharedReJitInfo, which manages the rejit request for all
- // instantiations.
- PTR_SharedReJitInfo const m_pShared;
-
- // My next sibling ReJitInfo for this rejit request (e.g., another
- // generic instantiation of the same method)
- PTR_ReJitInfo m_pNext;
-
- // The originally JITted code that was overwritten with the jmp stamp.
- BYTE m_rgSavedCode[JumpStubSize];
-
-
- ReJitInfo(PTR_MethodDesc pMD, SharedReJitInfo * pShared);
- ReJitInfo(PTR_Module pModule, mdMethodDef methodDef, SharedReJitInfo * pShared);
-
- // Intentionally no destructor. ReJitInfo is allocated on a loader heap,
- // and will be freed (along with its associated SharedReJitInfo) when the
- // AD is unloaded.
-
- Key GetKey();
- PTR_MethodDesc GetMethodDesc();
- void GetModuleAndToken(Module ** ppModule, mdMethodDef * pMethodDef);
- void GetModuleAndTokenRegardlessOfKeyType(Module ** ppModule, mdMethodDef * pMethodDef);
- InternalFlags GetState();
-
- COR_ILMETHOD * GetIL();
-
- HRESULT JumpStampNativeCode(PCODE pCode = NULL);
- HRESULT UndoJumpStampNativeCode(BOOL fEESuspended);
- HRESULT UpdateJumpTarget(BOOL fEESuspended, PCODE pRejittedCode);
- HRESULT UpdateJumpStampHelper(BYTE* pbCode, INT64 i64OldValue, INT64 i64newValue, BOOL fContentionPossible);
-
-
-protected:
- void CommonInit();
- INDEBUG(BOOL CodeIsSaved();)
-};
-
-//---------------------------------------------------------------------------------------
-// Used by the SHash inside ReJitManager which maintains the set of ReJitInfo instances.
-//
-class ReJitInfoTraits : public DefaultSHashTraits<PTR_ReJitInfo>
-{
-public:
-
- // explicitly declare local typedefs for these traits types, otherwise
- // the compiler may get confused
- typedef DefaultSHashTraits<PTR_ReJitInfo> PARENT;
- typedef PARENT::element_t element_t;
- typedef PARENT::count_t count_t;
-
- typedef ReJitInfo::Key key_t;
-
- static key_t GetKey(const element_t &e);
- static BOOL Equals(key_t k1, key_t k2);
- static count_t Hash(key_t k);
- static bool IsNull(const element_t &e);
-};
-
-// RequestRejit and RequestRevert use these batches to accumulate ReJitInfos that need their
-// jump stamps updated
-class ReJitManager;
-struct ReJitManagerJumpStampBatch
-{
- ReJitManagerJumpStampBatch(ReJitManager * pReJitManager) : undoMethods(), preStubMethods()
- {
- LIMITED_METHOD_CONTRACT;
- this->pReJitManager = pReJitManager;
- }
-
- ReJitManager* pReJitManager;
- CDynArray<ReJitInfo *> undoMethods;
- CDynArray<ReJitInfo *> preStubMethods;
-};
-
-class ReJitManagerJumpStampBatchTraits : public DefaultSHashTraits<ReJitManagerJumpStampBatch *>
-{
-public:
-
- // explicitly declare local typedefs for these traits types, otherwise
- // the compiler may get confused
- typedef DefaultSHashTraits<ReJitManagerJumpStampBatch *> PARENT;
- typedef PARENT::element_t element_t;
- typedef PARENT::count_t count_t;
-
- typedef ReJitManager * key_t;
-
- static key_t GetKey(const element_t &e)
- {
- return e->pReJitManager;
- }
-
- static BOOL Equals(key_t k1, key_t k2)
- {
- return (k1 == k2);
- }
-
- static count_t Hash(key_t k)
- {
- return (count_t)k;
- }
-
- static bool IsNull(const element_t &e)
- {
- return (e == NULL);
- }
-};
-
-struct ReJitReportErrorWorkItem
-{
- Module* pModule;
- mdMethodDef methodDef;
- MethodDesc* pMethodDesc;
- HRESULT hrStatus;
-};
-
-
#endif // FEATURE_REJIT
-//
-// These holders are used by runtime code that is making new code
-// available for execution, either by publishing jitted code
-// or restoring NGEN code. It ensures the publishing is synchronized
-// with rejit requests
-//
-class ReJitPublishMethodHolder
-{
-public:
-#if !defined(FEATURE_REJIT) || defined(DACCESS_COMPILE) || defined(CROSSGEN_COMPILE)
- ReJitPublishMethodHolder(MethodDesc* pMethod, PCODE pCode) { }
-#else
- ReJitPublishMethodHolder(MethodDesc* pMethod, PCODE pCode);
- ~ReJitPublishMethodHolder();
-#endif
-
-private:
-#if defined(FEATURE_REJIT)
- MethodDesc * m_pMD;
- HRESULT m_hr;
-#endif
-};
-class ReJitPublishMethodTableHolder
-{
-public:
-#if !defined(FEATURE_REJIT) || defined(DACCESS_COMPILE) || defined(CROSSGEN_COMPILE)
- ReJitPublishMethodTableHolder(MethodTable* pMethodTable) { }
-#else
- ReJitPublishMethodTableHolder(MethodTable* pMethodTable);
- ~ReJitPublishMethodTableHolder();
-#endif
-
-private:
-#if defined(FEATURE_REJIT)
- MethodTable* m_pMethodTable;
- CDynArray<ReJitReportErrorWorkItem> m_errors;
-#endif
-};
//---------------------------------------------------------------------------------------
// The big honcho. One of these per AppDomain, plus one for the
@@ -420,55 +81,23 @@ class ReJitManager
friend class ClrDataAccess;
friend class DacDbiInterfaceImpl;
- //I would have prefered to make these inner classes, but
- //then I can't friend them from crst easily.
- friend class ReJitPublishMethodHolder;
- friend class ReJitPublishMethodTableHolder;
-
private:
#ifdef FEATURE_REJIT
- // Hash table mapping MethodDesc* (or (ModuleID, mdMethodDef)) to its
- // ReJitInfos. One key may map to multiple ReJitInfos if there have been
- // multiple rejit requests made for the same MD. See
- // code:ReJitManager::ReJitManager#Invariants for more information.
- typedef SHash<ReJitInfoTraits> ReJitInfoHash;
-
// One global crst (for the entire CLR instance) to synchronize
// cross-ReJitManager operations, such as batch calls to RequestRejit and
// RequestRevert (which modify multiple ReJitManager instances).
static CrstStatic s_csGlobalRequest;
- // All The ReJitInfos (and their linked SharedReJitInfos) for this domain.
- ReJitInfoHash m_table;
-
- // The crst that synchronizes the data in m_table, including
- // adding/removing to m_table, as well as state changes made to
- // individual ReJitInfos & SharedReJitInfos in m_table.
- CrstExplicitInit m_crstTable;
-
#endif //FEATURE_REJIT
public:
- // The ReJITManager takes care of grabbing its m_crstTable when necessary. However,
- // for clients who need to do this explicitly (like ETW rundown), this holder may be
- // used.
- class TableLockHolder
-#ifdef FEATURE_REJIT
- : public CrstHolder
-#endif
- {
- public:
- TableLockHolder(ReJitManager * pReJitManager);
- };
static void InitStatic();
static BOOL IsReJITEnabled();
- static void OnAppDomainExit(AppDomain * pAppDomain);
-
static HRESULT RequestReJIT(
ULONG cFunctions,
ModuleID rgModuleIDs[],
@@ -480,85 +109,56 @@ public:
mdMethodDef rgMethodDefs[],
HRESULT rgHrStatuses[]);
- static PCODE DoReJitIfNecessary(PTR_MethodDesc pMD); // Invokes the jit, or returns previously rejitted code
-
- static void DoJumpStampForAssemblyIfNecessary(Assembly* pAssemblyToSearch);
-
- static DWORD GetCurrentReJitFlags(PTR_MethodDesc pMD);
-
- ReJitManager();
-
- void PreInit(BOOL fSharedDomain);
-
- ReJITID GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart);
-
- ReJITID GetReJitIdNoLock(PTR_MethodDesc pMD, PCODE pCodeStart);
+ static HRESULT ConfigureILCodeVersion(ILCodeVersion ilCodeVersion);
+ static CORJIT_FLAGS JitFlagsFromProfCodegenFlags(DWORD dwCodegenFlags);
- PCODE GetCodeStart(PTR_MethodDesc pMD, ReJITID reJitId);
-
- HRESULT GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * pcReJitIds, ReJITID reJitIds[]);
+ static ReJITID GetReJitId(PTR_MethodDesc pMD, PCODE pCodeStart);
+ static ReJITID GetReJitIdNoLock(PTR_MethodDesc pMD, PCODE pCodeStart);
+ static HRESULT GetReJITIDs(PTR_MethodDesc pMD, ULONG cReJitIds, ULONG * pcReJitIds, ReJITID reJitIds[]);
#ifdef FEATURE_REJIT
-
- INDEBUG(BOOL IsTableCrstOwnedByCurrentThread());
+#ifndef DACCESS_COMPILE
+ static void ReportReJITError(CodeVersionManager::CodePublishError* pErrorRecord);
+ static void ReportReJITError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus);
+#endif
private:
- static HRESULT IsMethodSafeForReJit(PTR_MethodDesc pMD);
- static void ReportReJITError(ReJitReportErrorWorkItem* pErrorRecord);
- static void ReportReJITError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus);
- static HRESULT AddReJITError(ReJitInfo* pReJitInfo, HRESULT hrStatus, CDynArray<ReJitReportErrorWorkItem> * pErrors);
- static HRESULT AddReJITError(Module* pModule, mdMethodDef methodDef, MethodDesc* pMD, HRESULT hrStatus, CDynArray<ReJitReportErrorWorkItem> * pErrors);
- HRESULT BatchUpdateJumpStamps(CDynArray<ReJitInfo *> * pUndoMethods, CDynArray<ReJitInfo *> * pPreStubMethods, CDynArray<ReJitReportErrorWorkItem> * pErrors);
- PCODE DoReJitIfNecessaryWorker(PTR_MethodDesc pMD); // Invokes the jit, or returns previously rejitted code
- DWORD GetCurrentReJitFlagsWorker(PTR_MethodDesc pMD);
+ static HRESULT UpdateActiveILVersions(
+ ULONG cFunctions,
+ ModuleID rgModuleIDs[],
+ mdMethodDef rgMethodDefs[],
+ HRESULT rgHrStatuses[],
+ BOOL fIsRevert);
- HRESULT MarkAllInstantiationsForReJit(
- SharedReJitInfo * pSharedForAllGenericInstantiations,
- AppDomain * pAppDomainToSearch,
- PTR_Module pModuleContainingGenericDefinition,
- mdMethodDef methodDef,
- ReJitManagerJumpStampBatch* pJumpStampBatch,
- CDynArray<ReJitReportErrorWorkItem> * pRejitErrors);
-
- INDEBUG(BaseDomain * m_pDomain;)
- INDEBUG(void Dump(LPCSTR szIntroText);)
- INDEBUG(void AssertRestOfEntriesAreReverted(
- ReJitInfoHash::KeyIterator iter,
- ReJitInfoHash::KeyIterator end);)
-
-
- HRESULT DoJumpStampIfNecessary(MethodDesc* pMD, PCODE pCode);
- HRESULT MarkForReJit(PTR_MethodDesc pMD, SharedReJitInfo * pSharedToReuse, ReJitManagerJumpStampBatch* pJumpStampBatch, CDynArray<ReJitReportErrorWorkItem> * pRejitErrors, SharedReJitInfo ** ppSharedUsed);
- HRESULT MarkForReJit(PTR_Module pModule, mdMethodDef methodDef, ReJitManagerJumpStampBatch* pJumpStampBatch, CDynArray<ReJitReportErrorWorkItem> * pRejitErrors, SharedReJitInfo ** ppSharedUsed);
- HRESULT MarkForReJitHelper(
- PTR_MethodDesc pMD,
- PTR_Module pModule,
- mdMethodDef methodDef,
- SharedReJitInfo * pSharedToReuse,
- ReJitManagerJumpStampBatch* pJumpStampBatch,
- CDynArray<ReJitReportErrorWorkItem> * pRejitErrors,
- /* out */ SharedReJitInfo ** ppSharedUsed);
- HRESULT AddNewReJitInfo(
- PTR_MethodDesc pMD,
+ struct CodeActivationBatch
+ {
+ CodeActivationBatch(CodeVersionManager * pCodeVersionManager) :
+ m_pCodeVersionManager(pCodeVersionManager)
+ {}
+ CodeVersionManager* m_pCodeVersionManager;
+ CDynArray<ILCodeVersion> m_methodsToActivate;
+ };
+
+ class CodeActivationBatchTraits : public DefaultSHashTraits<CodeActivationBatch *>
+ {
+ public:
+ typedef DefaultSHashTraits<CodeActivationBatch *> PARENT;
+ typedef PARENT::element_t element_t;
+ typedef PARENT::count_t count_t;
+ typedef CodeVersionManager * key_t;
+ static key_t GetKey(const element_t &e) { return e->m_pCodeVersionManager; }
+ static BOOL Equals(key_t k1, key_t k2) { return (k1 == k2); }
+ static count_t Hash(key_t k) { return (count_t)k; }
+ static bool IsNull(const element_t &e) { return (e == NULL); }
+ };
+
+ static HRESULT BindILVersion(
+ CodeVersionManager* pCodeVersionManager,
PTR_Module pModule,
mdMethodDef methodDef,
- SharedReJitInfo * pShared,
- ReJitInfo ** ppInfo);
- HRESULT RequestRevertByToken(PTR_Module pModule, mdMethodDef methodDef);
- PTR_ReJitInfo FindReJitInfo(PTR_MethodDesc pMD, PCODE pCodeStart, ReJITID reJitId);
- PTR_ReJitInfo FindNonRevertedReJitInfo(PTR_Module pModule, mdMethodDef methodDef);
- PTR_ReJitInfo FindNonRevertedReJitInfo(PTR_MethodDesc pMD);
- PTR_ReJitInfo FindNonRevertedReJitInfoHelper(PTR_MethodDesc pMD, PTR_Module pModule, mdMethodDef methodDef);
- ReJitInfo* FindPreReJittedReJitInfo(ReJitInfoHash::KeyIterator beginIter, ReJitInfoHash::KeyIterator endIter);
- HRESULT Revert(SharedReJitInfo * pShared, ReJitManagerJumpStampBatch* pJumpStampBatch);
- PCODE DoReJit(ReJitInfo * pInfo);
- ReJitInfoHash::KeyIterator GetBeginIterator(PTR_MethodDesc pMD);
- ReJitInfoHash::KeyIterator GetEndIterator(PTR_MethodDesc pMD);
- ReJitInfoHash::KeyIterator GetBeginIterator(PTR_Module pModule, mdMethodDef methodDef);
- ReJitInfoHash::KeyIterator GetEndIterator(PTR_Module pModule, mdMethodDef methodDef);
- void RemoveReJitInfosFromDomain(AppDomain * pAppDomain);
+ ILCodeVersion *pILCodeVersion);
#endif // FEATURE_REJIT
diff --git a/src/vm/rejit.inl b/src/vm/rejit.inl
index 8662eeaedf..3c42bcea00 100644
--- a/src/vm/rejit.inl
+++ b/src/vm/rejit.inl
@@ -13,149 +13,6 @@
#ifdef FEATURE_REJIT
-inline SharedReJitInfo::InternalFlags SharedReJitInfo::GetState()
-{
- LIMITED_METHOD_CONTRACT;
-
- return (InternalFlags)(m_dwInternalFlags & kStateMask);
-}
-
-inline ReJitInfo::ReJitInfo(PTR_MethodDesc pMD, SharedReJitInfo * pShared) :
- m_key(pMD),
- m_pShared(pShared)
-{
- LIMITED_METHOD_CONTRACT;
-
- CommonInit();
-}
-
-inline ReJitInfo::ReJitInfo(PTR_Module pModule, mdMethodDef methodDef, SharedReJitInfo * pShared) :
- m_key(pModule, methodDef),
- m_pShared(pShared)
-{
- LIMITED_METHOD_CONTRACT;
-
- CommonInit();
-}
-
-inline ReJitInfo::Key::Key() :
- m_pMD(NULL),
- m_methodDef(mdTokenNil),
- m_keyType(kUninitialized)
-{
- LIMITED_METHOD_CONTRACT;
-}
-
-inline ReJitInfo::Key::Key(PTR_MethodDesc pMD) :
- m_pMD(dac_cast<TADDR>(pMD)),
- m_methodDef(mdTokenNil),
- m_keyType(kMethodDesc)
-{
- LIMITED_METHOD_CONTRACT;
-}
-
-inline ReJitInfo::Key::Key(PTR_Module pModule, mdMethodDef methodDef) :
- m_pModule(dac_cast<TADDR>(pModule)),
- m_methodDef(methodDef),
- m_keyType(kMetadataToken)
-{
- LIMITED_METHOD_CONTRACT;
-}
-
-inline ReJitInfo::Key ReJitInfo::GetKey()
-{
- LIMITED_METHOD_CONTRACT;
-
- return m_key;
-}
-
-inline ReJitInfo::InternalFlags ReJitInfo::GetState()
-{
- LIMITED_METHOD_CONTRACT;
-
- return (InternalFlags)(m_dwInternalFlags & kStateMask);
-}
-
-inline PTR_MethodDesc ReJitInfo::GetMethodDesc()
-{
- LIMITED_METHOD_CONTRACT;
-
- _ASSERTE(m_key.m_keyType == Key::kMethodDesc);
- return PTR_MethodDesc(m_key.m_pMD);
-}
-
-inline void ReJitInfo::GetModuleAndToken(Module ** ppModule, mdMethodDef * pMethodDef)
-{
- LIMITED_METHOD_CONTRACT;
-
- _ASSERTE(ppModule != NULL);
- _ASSERTE(pMethodDef != NULL);
- _ASSERTE(m_key.m_keyType == Key::kMetadataToken);
-
- *ppModule = PTR_Module(m_key.m_pModule);
- *pMethodDef = (mdMethodDef) m_key.m_methodDef;
-}
-
-#ifdef _DEBUG
-inline BOOL ReJitInfo::CodeIsSaved()
-{
- LIMITED_METHOD_CONTRACT;
-
- for (size_t i=0; i < sizeof(m_rgSavedCode); i++)
- {
- if (m_rgSavedCode[i] != 0)
- return TRUE;
- }
- return FALSE;
-}
-#endif //_DEBUG
-
-// static
-inline ReJitInfoTraits::key_t ReJitInfoTraits::GetKey(const element_t &e)
-{
- LIMITED_METHOD_CONTRACT;
-
- return e->GetKey();
-}
-
-// static
-inline BOOL ReJitInfoTraits::Equals(key_t k1, key_t k2)
-{
- LIMITED_METHOD_CONTRACT;
-
- // Always use the values of the TADDRs of the MethodDesc * and Module * when treating
- // them as lookup keys into the SHash.
-
- if (k1.m_keyType == ReJitInfo::Key::kMethodDesc)
- {
- return ((k2.m_keyType == ReJitInfo::Key::kMethodDesc) &&
- (dac_cast<TADDR>(PTR_MethodDesc(k1.m_pMD)) ==
- dac_cast<TADDR>(PTR_MethodDesc(k2.m_pMD))));
- }
-
- _ASSERTE(k1.m_keyType == ReJitInfo::Key::kMetadataToken);
- return ((k2.m_keyType == ReJitInfo::Key::kMetadataToken) &&
- (dac_cast<TADDR>(PTR_Module(k1.m_pModule)) ==
- dac_cast<TADDR>(PTR_Module(k2.m_pModule))) &&
- (k1.m_methodDef == k2.m_methodDef));
-}
-
-// static
-inline ReJitInfoTraits::count_t ReJitInfoTraits::Hash(key_t k)
-{
- LIMITED_METHOD_CONTRACT;
-
- return ReJitInfo::Hash(k);
-}
-
-// static
-inline bool ReJitInfoTraits::IsNull(const element_t &e)
-{
- LIMITED_METHOD_CONTRACT;
-
- return e == NULL;
-}
-
// static
inline void ReJitManager::InitStatic()
{
@@ -172,92 +29,9 @@ inline BOOL ReJitManager::IsReJITEnabled()
return CORProfilerEnableRejit();
}
-inline ReJitManager::ReJitInfoHash::KeyIterator ReJitManager::GetBeginIterator(PTR_MethodDesc pMD)
-{
- LIMITED_METHOD_CONTRACT;
-#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
- return m_table.Begin(ReJitInfo::Key(pMD));
-}
-
-inline ReJitManager::ReJitInfoHash::KeyIterator ReJitManager::GetEndIterator(PTR_MethodDesc pMD)
-{
- LIMITED_METHOD_CONTRACT;
-#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
- return m_table.End(ReJitInfo::Key(pMD));
-}
-
-inline ReJitManager::ReJitInfoHash::KeyIterator ReJitManager::GetBeginIterator(PTR_Module pModule, mdMethodDef methodDef)
-{
- LIMITED_METHOD_CONTRACT;
#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
- return m_table.Begin(ReJitInfo::Key(pModule, methodDef));
-}
-
-inline ReJitManager::ReJitInfoHash::KeyIterator ReJitManager::GetEndIterator(PTR_Module pModule, mdMethodDef methodDef)
-{
- LIMITED_METHOD_CONTRACT;
-#ifndef DACCESS_COMPILE
- _ASSERTE(m_crstTable.OwnedByCurrentThread());
-#endif
- return m_table.End(ReJitInfo::Key(pModule, methodDef));
-}
-
-#ifdef _DEBUG
-inline BOOL ReJitManager::IsTableCrstOwnedByCurrentThread()
-{
- LIMITED_METHOD_CONTRACT;
-
- return m_crstTable.OwnedByCurrentThread();
-}
-#endif //_DEBUG
-
-
-inline HRESULT ReJitManager::MarkForReJit(
- PTR_MethodDesc pMD,
- SharedReJitInfo * pSharedToReuse,
- ReJitManagerJumpStampBatch* pJumpStampBatch,
- CDynArray<ReJitReportErrorWorkItem> * pRejitErrors,
- /* out */ SharedReJitInfo ** ppSharedUsed)
-{
- WRAPPER_NO_CONTRACT;
-
- return MarkForReJitHelper(pMD, NULL, mdTokenNil, pSharedToReuse, pJumpStampBatch, pRejitErrors, ppSharedUsed);
-}
-
-inline HRESULT ReJitManager::MarkForReJit(
- PTR_Module pModule,
- mdMethodDef methodDef,
- ReJitManagerJumpStampBatch* pJumpStampBatch,
- CDynArray<ReJitReportErrorWorkItem> * pRejitErrors,
- /* out */ SharedReJitInfo ** ppSharedUsed)
-{
- WRAPPER_NO_CONTRACT;
-
- return MarkForReJitHelper(NULL, pModule, methodDef, NULL, pJumpStampBatch, pRejitErrors, ppSharedUsed);
-}
-
-inline PTR_ReJitInfo ReJitManager::FindNonRevertedReJitInfo(PTR_Module pModule, mdMethodDef methodDef)
-{
- WRAPPER_NO_CONTRACT;
-
- return FindNonRevertedReJitInfoHelper(NULL, pModule, methodDef);
-}
-
-inline PTR_ReJitInfo ReJitManager::FindNonRevertedReJitInfo(PTR_MethodDesc pMD)
-{
- WRAPPER_NO_CONTRACT;
-
- return FindNonRevertedReJitInfoHelper(pMD, NULL, NULL);
-}
-
//static
-inline void ReJitManager::ReportReJITError(ReJitReportErrorWorkItem* pErrorRecord)
+inline void ReJitManager::ReportReJITError(CodeVersionManager::CodePublishError* pErrorRecord)
{
CONTRACTL
{
@@ -298,14 +72,7 @@ inline void ReJitManager::ReportReJITError(Module* pModule, mdMethodDef methodDe
}
#endif // PROFILING_SUPPORTED
}
-
-inline ReJitManager::TableLockHolder::TableLockHolder(ReJitManager * pReJitManager)
-#ifdef FEATURE_REJIT
- : CrstHolder(&pReJitManager->m_crstTable)
-#endif // FEATURE_REJIT
-{
- WRAPPER_NO_CONTRACT;
-}
+#endif // DACCESS_COMPILE
#else // FEATURE_REJIT
@@ -313,32 +80,16 @@ inline ReJitManager::TableLockHolder::TableLockHolder(ReJitManager * pReJitManag
// stubs so the rest of the VM doesn't have to be littered with #ifdef FEATURE_REJIT
// static
-inline PCODE ReJitManager::DoReJitIfNecessary(PTR_MethodDesc)
-{
- return NULL;
-}
-
-// static
inline BOOL ReJitManager::IsReJITEnabled()
{
return FALSE;
}
-// static
-inline DWORD ReJitManager::GetCurrentReJitFlags(PTR_MethodDesc)
-{
- return 0;
-}
-
// static
inline void ReJitManager::InitStatic()
{
}
-inline ReJitManager::TableLockHolder::TableLockHolder(ReJitManager *)
-{
-}
-
#endif // FEATURE_REJIT
diff --git a/src/vm/runtimehandles.cpp b/src/vm/runtimehandles.cpp
index d3dad5a596..952be6206e 100644
--- a/src/vm/runtimehandles.cpp
+++ b/src/vm/runtimehandles.cpp
@@ -28,7 +28,6 @@
#include "contractimpl.h"
#include "dynamicmethod.h"
#include "peimagelayout.inl"
-#include "security.h"
#include "eventtrace.h"
#include "invokeutil.h"
@@ -135,9 +134,7 @@ static BOOL CheckCAVisibilityFromDecoratedType(MethodTable* pCAMT, MethodDesc* p
dwAttr,
pCACtor,
NULL,
- *AccessCheckOptions::s_pNormalAccessChecks,
- FALSE,
- FALSE);
+ *AccessCheckOptions::s_pNormalAccessChecks);
}
BOOL QCALLTYPE RuntimeMethodHandle::IsCAVisibleFromDecoratedType(
@@ -970,6 +967,24 @@ FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::IsInterface, ReflectClassBaseObject *pTy
}
FCIMPLEND;
+
+FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::IsByRefLike, ReflectClassBaseObject *pTypeUNSAFE)
+{
+ CONTRACTL {
+ FCALL_CHECK;
+ }
+ CONTRACTL_END;
+
+ REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
+
+ _ASSERTE(refType != NULL);
+
+ TypeHandle typeHandle = refType->GetType();
+
+ FC_RETURN_BOOL(typeHandle.IsByRefLike());
+}
+FCIMPLEND
+
BOOL
QCALLTYPE
RuntimeTypeHandle::IsVisible(
@@ -996,37 +1011,6 @@ RuntimeTypeHandle::IsVisible(
return fIsExternallyVisible;
} // RuntimeTypeHandle::IsVisible
-FCIMPL1(FC_BOOL_RET, RuntimeTypeHandle::HasProxyAttribute, ReflectClassBaseObject *pTypeUNSAFE) {
- CONTRACTL {
- FCALL_CHECK;
- }
- CONTRACTL_END;
-
- REFLECTCLASSBASEREF refType = (REFLECTCLASSBASEREF)ObjectToOBJECTREF(pTypeUNSAFE);
-
- if (refType == NULL)
- FCThrowRes(kArgumentNullException, W("Arg_InvalidHandle"));
-
- TypeHandle typeHandle = refType->GetType();
-
- // TODO: Justify this
- if (typeHandle.IsGenericVariable())
- FC_RETURN_BOOL(FALSE);
-
- if (typeHandle.IsTypeDesc()) {
- if (!typeHandle.IsArray())
- FC_RETURN_BOOL(FALSE);
- }
-
- MethodTable* pMT= typeHandle.GetMethodTable();
-
- if (!pMT)
- FCThrowRes(kArgumentException, W("Arg_InvalidHandle"));
-
- FC_RETURN_BOOL(pMT->GetClass()->HasRemotingProxyAttribute());
-}
-FCIMPLEND
-
FCIMPL2(FC_BOOL_RET, RuntimeTypeHandle::IsComObject, ReflectClassBaseObject *pTypeUNSAFE, CLR_BOOL isGenericCOM) {
#ifdef FEATURE_COMINTEROP
CONTRACTL {
@@ -2216,6 +2200,14 @@ FCIMPL1(FC_BOOL_RET, RuntimeMethodHandle::IsGenericMethodDefinition, MethodDesc
}
FCIMPLEND
+FCIMPL1(INT32, RuntimeMethodHandle::GetGenericParameterCount, MethodDesc * pMethod)
+{
+ FCALL_CONTRACT;
+
+ return pMethod->GetNumGenericMethodArgs();
+}
+FCIMPLEND
+
FCIMPL1(FC_BOOL_RET, RuntimeMethodHandle::IsDynamicMethod, MethodDesc * pMethod)
{
FCALL_CONTRACT;
@@ -3115,3 +3107,4 @@ void QCALLTYPE RuntimeMethodHandle::GetCallerType(QCall::StackCrawlMarkHandle pS
return;
}
+
diff --git a/src/vm/runtimehandles.h b/src/vm/runtimehandles.h
index fc18d6f65c..8978e20946 100644
--- a/src/vm/runtimehandles.h
+++ b/src/vm/runtimehandles.h
@@ -126,8 +126,9 @@ public:
// Static method on RuntimeTypeHandle
static FCDECL1(Object*, Allocate, ReflectClassBaseObject *refType) ; //A.CI work
- static FCDECL4(Object*, CreateInstance, ReflectClassBaseObject* refThisUNSAFE,
+ static FCDECL5(Object*, CreateInstance, ReflectClassBaseObject* refThisUNSAFE,
CLR_BOOL publicOnly,
+ CLR_BOOL wrapExceptions,
CLR_BOOL *pbCanBeCached,
MethodDesc** pConstructor);
@@ -193,11 +194,11 @@ public:
static FCDECL1(ReflectClassBaseObject*, GetDeclaringType, ReflectClassBaseObject* pType);
static FCDECL1(FC_BOOL_RET, IsValueType, ReflectClassBaseObject* pType);
static FCDECL1(FC_BOOL_RET, IsInterface, ReflectClassBaseObject* pType);
+ static FCDECL1(FC_BOOL_RET, IsByRefLike, ReflectClassBaseObject* pType);
static
BOOL QCALLTYPE IsVisible(EnregisteredTypeHandle pTypeHandle);
- static FCDECL1(FC_BOOL_RET, HasProxyAttribute, ReflectClassBaseObject *pType);
static FCDECL2(FC_BOOL_RET, IsComObject, ReflectClassBaseObject *pType, CLR_BOOL isGenericCOM);
static FCDECL2(FC_BOOL_RET, CanCastTo, ReflectClassBaseObject *pType, ReflectClassBaseObject *pTarget);
static FCDECL2(FC_BOOL_RET, IsInstanceOfType, ReflectClassBaseObject *pType, Object *object);
@@ -266,7 +267,7 @@ class RuntimeMethodHandle {
public:
static FCDECL1(ReflectMethodObject*, GetCurrentMethod, StackCrawlMark* stackMark);
- static FCDECL4(Object*, InvokeMethod, Object *target, PTRArray *objs, SignatureNative* pSig, CLR_BOOL fConstructor);
+ static FCDECL5(Object*, InvokeMethod, Object *target, PTRArray *objs, SignatureNative* pSig, CLR_BOOL fConstructor, CLR_BOOL fWrapExceptions);
struct StreamingContextData {
Object * additionalContext; // additionalContex was changed from OBJECTREF to Object to avoid having a
@@ -338,6 +339,9 @@ public:
static
void QCALLTYPE StripMethodInstantiation(MethodDesc * pMethod, QCall::ObjectHandleOnStack refMethod);
+ static
+ FCDECL1(INT32, GetGenericParameterCount, MethodDesc * pMethod);
+
// see comment in the cpp file
static FCDECL3(MethodDesc*, GetStubIfNeeded, MethodDesc *pMethod, ReflectClassBaseObject *pType, PtrArray* instArray);
static FCDECL2(MethodDesc*, GetMethodFromCanonical, MethodDesc *pMethod, PTR_ReflectClassBaseObject pType);
diff --git a/src/vm/sampleprofiler.cpp b/src/vm/sampleprofiler.cpp
index e4721577ae..4a858e9b4a 100644
--- a/src/vm/sampleprofiler.cpp
+++ b/src/vm/sampleprofiler.cpp
@@ -13,7 +13,7 @@
Volatile<BOOL> SampleProfiler::s_profilingEnabled = false;
Thread* SampleProfiler::s_pSamplingThread = NULL;
-const GUID SampleProfiler::s_providerID = {0x3c530d44,0x97ae,0x513a,{0x1e,0x6d,0x78,0x3e,0x8f,0x8e,0x03,0xa9}}; // {3c530d44-97ae-513a-1e6d-783e8f8e03a9}
+const WCHAR* SampleProfiler::s_providerName = W("Microsoft-DotNETCore-SampleProfiler");
EventPipeProvider* SampleProfiler::s_pEventPipeProvider = NULL;
EventPipeEvent* SampleProfiler::s_pThreadTimeEvent = NULL;
BYTE* SampleProfiler::s_pPayloadExternal = NULL;
@@ -36,7 +36,7 @@ void SampleProfiler::Enable()
if(s_pEventPipeProvider == NULL)
{
- s_pEventPipeProvider = EventPipe::CreateProvider(s_providerID);
+ s_pEventPipeProvider = EventPipe::CreateProvider(SL(s_providerName));
s_pThreadTimeEvent = s_pEventPipeProvider->AddEvent(
0, /* eventID */
0, /* keywords */
diff --git a/src/vm/sampleprofiler.h b/src/vm/sampleprofiler.h
index 02eb6b39cd..51290b4d9c 100644
--- a/src/vm/sampleprofiler.h
+++ b/src/vm/sampleprofiler.h
@@ -50,7 +50,7 @@ class SampleProfiler
static Thread *s_pSamplingThread;
// The provider and event emitted by the profiler.
- static const GUID s_providerID;
+ static const WCHAR* s_providerName;
static EventPipeProvider *s_pEventPipeProvider;
static EventPipeEvent *s_pThreadTimeEvent;
diff --git a/src/vm/security.cpp b/src/vm/security.cpp
deleted file mode 100644
index 7a6c8b82ea..0000000000
--- a/src/vm/security.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-//
-
-#include "common.h"
-
-#include "security.h"
-
-//
-// The method in this file have nothing to do with security. They historically lived in security subsystem.
-// TODO: Move them to move appropriate place.
-//
-
-void Security::CopyByteArrayToEncoding(IN U1ARRAYREF* pArray, OUT PBYTE* ppbData, OUT DWORD* pcbData)
-{
- CONTRACTL {
- THROWS;
- GC_NOTRIGGER;
- MODE_COOPERATIVE;
- PRECONDITION(CheckPointer(pArray));
- PRECONDITION(CheckPointer(ppbData));
- PRECONDITION(CheckPointer(pcbData));
- PRECONDITION(*pArray != NULL);
- } CONTRACTL_END;
-
- DWORD size = (DWORD) (*pArray)->GetNumComponents();
- *ppbData = new BYTE[size];
- *pcbData = size;
-
- CopyMemory(*ppbData, (*pArray)->GetDirectPointerToNonObjectElements(), size);
-}
-
-void Security::CopyEncodingToByteArray(IN PBYTE pbData, IN DWORD cbData, IN OBJECTREF* pArray)
-{
- CONTRACTL {
- THROWS;
- GC_TRIGGERS;
- MODE_COOPERATIVE;
- } CONTRACTL_END;
-
- U1ARRAYREF pObj;
- _ASSERTE(pArray);
-
- pObj = (U1ARRAYREF)AllocatePrimitiveArray(ELEMENT_TYPE_U1,cbData);
- memcpyNoGCRefs(pObj->m_Array, pbData, cbData);
- *pArray = (OBJECTREF) pObj;
-}
diff --git a/src/vm/security.h b/src/vm/security.h
deleted file mode 100644
index fa4840998e..0000000000
--- a/src/vm/security.h
+++ /dev/null
@@ -1,93 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-//
-
-#ifndef __security_h__
-#define __security_h__
-
-//
-// Stubbed out implementation of security subsystem
-// TODO: Eliminate this file
-//
-
-enum SecurityStackWalkType
-{
- SSWT_DECLARATIVE_DEMAND = 1,
- SSWT_IMPERATIVE_DEMAND = 2,
- SSWT_DEMAND_FROM_NATIVE = 3,
- SSWT_IMPERATIVE_ASSERT = 4,
- SSWT_DENY_OR_PERMITONLY = 5,
- SSWT_LATEBOUND_LINKDEMAND = 6,
- SSWT_COUNT_OVERRIDES = 7,
- SSWT_GET_ZONE_AND_URL = 8,
-};
-
-// special flags
-#define SECURITY_UNMANAGED_CODE 0
-#define SECURITY_SKIP_VER 1
-#define REFLECTION_TYPE_INFO 2
-#define SECURITY_ASSERT 3
-#define REFLECTION_MEMBER_ACCESS 4
-#define SECURITY_SERIALIZATION 5
-#define REFLECTION_RESTRICTED_MEMBER_ACCESS 6
-#define SECURITY_FULL_TRUST 7
-#define SECURITY_BINDING_REDIRECTS 8
-
-// Ultimately this will become the only interface through
-// which the VM will access security code.
-
-namespace Security
-{
- inline BOOL IsTransparencyEnforcementEnabled() { return false; }
-
- inline BOOL CanCallUnmanagedCode(Module *pModule) { return true; }
-
-#ifndef DACCESS_COMPILE
- inline BOOL CanTailCall(MethodDesc* pMD) { return true; }
- inline BOOL CanHaveRVA(Assembly * pAssembly) { return true; }
- inline BOOL CanAccessNonVerifiableExplicitField(MethodDesc* pMD) { return true; }
- inline BOOL CanSkipVerification(MethodDesc * pMethod) { return true; }
-#endif
-
- inline BOOL CanSkipVerification(DomainAssembly * pAssembly) { return true; }
-
- // ----------------------------------------
- // SecurityAttributes
- // ----------------------------------------
-
- void CopyByteArrayToEncoding(IN U1ARRAYREF* pArray, OUT PBYTE* pbData, OUT DWORD* cbData);
- void CopyEncodingToByteArray(IN PBYTE pbData, IN DWORD cbData, IN OBJECTREF* pArray);
-
- inline void SpecialDemand(SecurityStackWalkType eType, DWORD whatPermission) { }
-
- // Transparency checks
- inline BOOL IsMethodTransparent(MethodDesc * pMD) { return false; }
- inline BOOL IsMethodCritical(MethodDesc * pMD) { return true; }
- inline BOOL IsMethodSafeCritical(MethodDesc * pMD) { return false; }
-
- inline BOOL IsTypeCritical(MethodTable *pMT) { return true; }
- inline BOOL IsTypeSafeCritical(MethodTable *pMT) { return false; }
- inline BOOL IsTypeTransparent(MethodTable * pMT) { return false; }
- inline BOOL IsTypeAllTransparent(MethodTable * pMT) { return false; }
-
- inline BOOL IsFieldTransparent(FieldDesc * pFD) { return false; }
- inline BOOL IsFieldCritical(FieldDesc * pFD) { return true; }
- inline BOOL IsFieldSafeCritical(FieldDesc * pFD) { return false; }
-
- inline BOOL IsTokenTransparent(Module* pModule, mdToken token) { return false; }
-
- inline BOOL CheckCriticalAccess(AccessCheckContext* pContext,
- MethodDesc* pOptionalTargetMethod = NULL,
- FieldDesc* pOptionalTargetField = NULL,
- MethodTable * pOptionalTargetType = NULL)
- {
- return true;
- }
-
- inline void CheckLinkDemandAgainstAppDomain(MethodDesc *pMD)
- {
- }
-};
-
-#endif
diff --git a/src/vm/siginfo.cpp b/src/vm/siginfo.cpp
index 30dcf0f1ad..40a55cb6f0 100644
--- a/src/vm/siginfo.cpp
+++ b/src/vm/siginfo.cpp
@@ -18,7 +18,6 @@
#include "field.h"
#include "eeconfig.h"
#include "runtimehandles.h" // for SignatureNative
-#include "security.h" // for CanSkipVerification
#include "winwrap.h"
#include <formattype.h>
#include "sigbuilder.h"
@@ -1531,12 +1530,11 @@ TypeHandle SigPointer::GetTypeHandleThrowing(
if (typFromSigIsClass != typLoadedIsClass)
{
- if((pModule->GetMDImport()->GetMetadataStreamVersion() != MD_STREAM_VER_1X)
- || !Security::CanSkipVerification(pModule->GetDomainAssembly()))
+ if (pModule->GetMDImport()->GetMetadataStreamVersion() != MD_STREAM_VER_1X)
{
- pOrigModule->GetAssembly()->ThrowTypeLoadException(pModule->GetMDImport(),
- typeToken,
- BFA_CLASSLOAD_VALUETYPEMISMATCH);
+ pOrigModule->GetAssembly()->ThrowTypeLoadException(pModule->GetMDImport(),
+ typeToken,
+ BFA_CLASSLOAD_VALUETYPEMISMATCH);
}
}
}
diff --git a/src/vm/stdinterfaces.cpp b/src/vm/stdinterfaces.cpp
index 34ba39019e..fa2d2a7b23 100644
--- a/src/vm/stdinterfaces.cpp
+++ b/src/vm/stdinterfaces.cpp
@@ -1262,15 +1262,12 @@ Dispatch_GetIDsOfNames(IDispatch* pDisp, REFIID riid, __in_ecount(cNames) OLECHA
if (pCMT->HasInvisibleParent())
return E_NOTIMPL;
- // Use the right implementation based on the flags in the ComMethodTable and ComCallWrapperTemplate
- if (!pCMT->IsDefinedInUntrustedCode())
+ ComCallWrapperTemplate *pTemplate = MapIUnknownToWrapper(pDisp)->GetComCallWrapperTemplate();
+ if (pTemplate->IsUseOleAutDispatchImpl())
{
- ComCallWrapperTemplate *pTemplate = MapIUnknownToWrapper(pDisp)->GetComCallWrapperTemplate();
- if (pTemplate->IsUseOleAutDispatchImpl())
- {
- return OleAutDispatchImpl_GetIDsOfNames(pDisp, riid, rgszNames, cNames, lcid, rgdispid);
- }
+ return OleAutDispatchImpl_GetIDsOfNames(pDisp, riid, rgszNames, cNames, lcid, rgdispid);
}
+
return InternalDispatchImpl_GetIDsOfNames(pDisp, riid, rgszNames, cNames, lcid, rgdispid);
}
@@ -1305,14 +1302,10 @@ Dispatch_Invoke
if (pCMT->HasInvisibleParent())
return E_NOTIMPL;
- // Use the right implementation based on the flags in the ComMethodTable.
- if (!pCMT->IsDefinedInUntrustedCode())
+ ComCallWrapperTemplate *pTemplate = MapIUnknownToWrapper(pDisp)->GetComCallWrapperTemplate();
+ if (pTemplate->IsUseOleAutDispatchImpl())
{
- ComCallWrapperTemplate *pTemplate = MapIUnknownToWrapper(pDisp)->GetComCallWrapperTemplate();
- if (pTemplate->IsUseOleAutDispatchImpl())
- {
- return OleAutDispatchImpl_Invoke(pDisp, dispidMember, riid, lcid, wFlags, pdispparams, pvarResult, pexcepinfo, puArgErr);
- }
+ return OleAutDispatchImpl_Invoke(pDisp, dispidMember, riid, lcid, wFlags, pdispparams, pvarResult, pexcepinfo, puArgErr);
}
return InternalDispatchImpl_Invoke(pDisp, dispidMember, riid, lcid, wFlags, pdispparams, pvarResult, pexcepinfo, puArgErr);
diff --git a/src/vm/stubhelpers.cpp b/src/vm/stubhelpers.cpp
index 837d88f65a..ead312d042 100644
--- a/src/vm/stubhelpers.cpp
+++ b/src/vm/stubhelpers.cpp
@@ -16,7 +16,6 @@
#include "dllimport.h"
#include "fieldmarshaler.h"
#include "comdelegate.h"
-#include "security.h"
#include "eventtrace.h"
#include "comdatetime.h"
#include "gcheaputilities.h"
@@ -1691,7 +1690,7 @@ FCIMPL4(Object*, StubHelpers::GetCOMHRExceptionObject, HRESULT hr, MethodDesc *p
}
}
- GetExceptionForHR(hr, pErrInfo, fForWinRT, &oThrowable, pResErrorInfo, bHasNonCLRLanguageErrorObject);
+ GetExceptionForHR(hr, pErrInfo, !fForWinRT, &oThrowable, pResErrorInfo, bHasNonCLRLanguageErrorObject);
}
HELPER_METHOD_FRAME_END();
diff --git a/src/vm/syncblk.inl b/src/vm/syncblk.inl
index 37d6748525..cb6b280228 100644
--- a/src/vm/syncblk.inl
+++ b/src/vm/syncblk.inl
@@ -178,7 +178,7 @@ FORCEINLINE AwareLock::LeaveHelperAction AwareLock::LeaveHelper(Thread* pCurThre
_ASSERTE((size_t)m_MonitorHeld & 1);
_ASSERTE(m_Recursion >= 1);
-#if defined(_DEBUG) && defined(TRACK_SYNC)
+#if defined(_DEBUG) && defined(TRACK_SYNC) && !defined(CROSSGEN_COMPILE)
// The best place to grab this is from the ECall frame
Frame *pFrame = pCurThread->GetFrame();
int caller = (pFrame && pFrame != FRAME_TOP ? (int) pFrame->GetReturnAddress() : -1);
diff --git a/src/vm/threadpoolrequest.cpp b/src/vm/threadpoolrequest.cpp
index a1ec4b087e..523a0631d6 100644
--- a/src/vm/threadpoolrequest.cpp
+++ b/src/vm/threadpoolrequest.cpp
@@ -21,19 +21,18 @@
#include "object.h"
#include "field.h"
#include "excep.h"
-#include "security.h"
#include "eeconfig.h"
#include "corhost.h"
#include "nativeoverlapped.h"
#include "appdomain.inl"
-BYTE PerAppDomainTPCountList::s_padding[64 - sizeof(LONG)];
+BYTE PerAppDomainTPCountList::s_padding[MAX_CACHE_LINE_SIZE - sizeof(LONG)];
// Make this point to unmanaged TP in case, no appdomains have initialized yet.
// Cacheline aligned, hot variable
-DECLSPEC_ALIGN(64) LONG PerAppDomainTPCountList::s_ADHint = -1;
+DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) LONG PerAppDomainTPCountList::s_ADHint = -1;
// Move out of from preceeding variables' cache line
-DECLSPEC_ALIGN(64) UnManagedPerAppDomainTPCount PerAppDomainTPCountList::s_unmanagedTPCount;
+DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) UnManagedPerAppDomainTPCount PerAppDomainTPCountList::s_unmanagedTPCount;
//The list of all per-appdomain work-request counts.
ArrayListStatic PerAppDomainTPCountList::s_appDomainIndexList;
diff --git a/src/vm/threadpoolrequest.h b/src/vm/threadpoolrequest.h
index 8d2c7e486a..3d2dc3da82 100644
--- a/src/vm/threadpoolrequest.h
+++ b/src/vm/threadpoolrequest.h
@@ -20,6 +20,8 @@
#ifndef _THREADPOOL_REQUEST_H
#define _THREADPOOL_REQUEST_H
+#include "util.hpp"
+
#define TP_QUANTUM 2
#define UNUSED_THREADPOOL_INDEX (DWORD)-1
@@ -181,11 +183,11 @@ public:
private:
ADID m_id;
TPIndex m_index;
- DECLSPEC_ALIGN(64) struct {
- BYTE m_padding1[64 - sizeof(LONG)];
+ DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) struct {
+ BYTE m_padding1[MAX_CACHE_LINE_SIZE - sizeof(LONG)];
// Only use with VolatileLoad+VolatileStore+FastInterlockCompareExchange
LONG m_numRequestsPending;
- BYTE m_padding2[64];
+ BYTE m_padding2[MAX_CACHE_LINE_SIZE];
};
};
@@ -286,11 +288,11 @@ public:
private:
SpinLock m_lock;
ULONG m_NumRequests;
- DECLSPEC_ALIGN(64) struct {
- BYTE m_padding1[64 - sizeof(LONG)];
+ DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) struct {
+ BYTE m_padding1[MAX_CACHE_LINE_SIZE - sizeof(LONG)];
// Only use with VolatileLoad+VolatileStore+FastInterlockCompareExchange
LONG m_outstandingThreadRequestCount;
- BYTE m_padding2[64];
+ BYTE m_padding2[MAX_CACHE_LINE_SIZE];
};
};
@@ -351,12 +353,12 @@ public:
private:
static DWORD FindFirstFreeTpEntry();
- static BYTE s_padding[64 - sizeof(LONG)];
- DECLSPEC_ALIGN(64) static LONG s_ADHint;
- DECLSPEC_ALIGN(64) static UnManagedPerAppDomainTPCount s_unmanagedTPCount;
+ static BYTE s_padding[MAX_CACHE_LINE_SIZE - sizeof(LONG)];
+ DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) static LONG s_ADHint;
+ DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) static UnManagedPerAppDomainTPCount s_unmanagedTPCount;
//The list of all per-appdomain work-request counts.
static ArrayListStatic s_appDomainIndexList;
};
-#endif //_THREADPOOL_REQUEST_H \ No newline at end of file
+#endif //_THREADPOOL_REQUEST_H
diff --git a/src/vm/threads.cpp b/src/vm/threads.cpp
index 59fec2bdc3..91373930e9 100644
--- a/src/vm/threads.cpp
+++ b/src/vm/threads.cpp
@@ -495,7 +495,6 @@ void Thread::ChooseThreadCPUGroupAffinity()
}
CONTRACTL_END;
-#ifndef FEATURE_PAL
if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
return;
@@ -515,7 +514,6 @@ void Thread::ChooseThreadCPUGroupAffinity()
CPUGroupInfo::SetThreadGroupAffinity(GetThreadHandle(), &groupAffinity, NULL);
m_wCPUGroup = groupAffinity.Group;
m_pAffinityMask = groupAffinity.Mask;
-#endif // !FEATURE_PAL
}
void Thread::ClearThreadCPUGroupAffinity()
@@ -527,7 +525,6 @@ void Thread::ClearThreadCPUGroupAffinity()
}
CONTRACTL_END;
-#ifndef FEATURE_PAL
if (!CPUGroupInfo::CanEnableGCCPUGroups() || !CPUGroupInfo::CanEnableThreadUseAllCpuGroups())
return;
@@ -545,7 +542,6 @@ void Thread::ClearThreadCPUGroupAffinity()
m_wCPUGroup = 0;
m_pAffinityMask = 0;
-#endif // !FEATURE_PAL
}
DWORD Thread::StartThread()
@@ -1367,6 +1363,8 @@ void InitThreadManager()
}
CONTRACTL_END;
+ Thread::s_initializeYieldProcessorNormalizedCrst.Init(CrstLeafLock);
+
// All patched helpers should fit into one page.
// If you hit this assert on retail build, there is most likely problem with BBT script.
_ASSERTE_ALL_BUILDS("clr/src/VM/threads.cpp", (BYTE*)JIT_PatchedCodeLast - (BYTE*)JIT_PatchedCodeStart < (ptrdiff_t)GetOsPageSize());
@@ -1582,7 +1580,7 @@ void Dbg_TrackSyncStack::EnterSync(UINT_PTR caller, void *pAwareLock)
STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::EnterSync, IP=%p, Recursion=%d, MonitorHeld=%d, HoldingThread=%p.\n",
caller,
((AwareLock*)pAwareLock)->m_Recursion,
- ((AwareLock*)pAwareLock)->m_MonitorHeld,
+ ((AwareLock*)pAwareLock)->m_MonitorHeld.LoadWithoutBarrier(),
((AwareLock*)pAwareLock)->m_HoldingThread );
if (m_Active)
@@ -1608,7 +1606,7 @@ void Dbg_TrackSyncStack::LeaveSync(UINT_PTR caller, void *pAwareLock)
STRESS_LOG4(LF_SYNC, LL_INFO100, "Dbg_TrackSyncStack::LeaveSync, IP=%p, Recursion=%d, MonitorHeld=%d, HoldingThread=%p.\n",
caller,
((AwareLock*)pAwareLock)->m_Recursion,
- ((AwareLock*)pAwareLock)->m_MonitorHeld,
+ ((AwareLock*)pAwareLock)->m_MonitorHeld.LoadWithoutBarrier(),
((AwareLock*)pAwareLock)->m_HoldingThread );
if (m_Active)
@@ -2017,10 +2015,8 @@ Thread::Thread()
m_fGCSpecial = FALSE;
-#if !defined(FEATURE_PAL)
m_wCPUGroup = 0;
m_pAffinityMask = 0;
-#endif
m_pAllLoggedTypes = NULL;
@@ -2530,7 +2526,7 @@ void UndoRevert(BOOL bReverted, HANDLE hToken)
// We don't want ::CreateThread() calls scattered throughout the source. So gather
// them all here.
-BOOL Thread::CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args)
+BOOL Thread::CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName)
{
CONTRACTL {
NOTHROW;
@@ -2557,6 +2553,7 @@ BOOL Thread::CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, voi
bRet = CreateNewOSThread(stackSize, start, args);
#ifndef FEATURE_PAL
UndoRevert(bReverted, token);
+ SetThreadName(m_ThreadHandle, pName);
#endif // !FEATURE_PAL
return bRet;
@@ -11749,3 +11746,87 @@ ULONGLONG Thread::QueryThreadProcessorUsage()
return ullCurrentUsage - ullPreviousUsage;
}
#endif // FEATURE_APPDOMAIN_RESOURCE_MONITORING
+
+CrstStatic Thread::s_initializeYieldProcessorNormalizedCrst;
+int Thread::s_yieldsPerNormalizedYield = 0;
+int Thread::s_optimalMaxNormalizedYieldsPerSpinIteration = 0;
+
+void Thread::InitializeYieldProcessorNormalized()
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CrstHolder lock(&s_initializeYieldProcessorNormalizedCrst);
+
+ if (IsYieldProcessorNormalizedInitialized())
+ {
+ return;
+ }
+
+ // Intel pre-Skylake processor: measured typically 14-17 cycles per yield
+ // Intel post-Skylake processor: measured typically 125-150 cycles per yield
+ const int DefaultYieldsPerNormalizedYield = 1; // defaults are for when no measurement is done
+ const int DefaultOptimalMaxNormalizedYieldsPerSpinIteration = 64; // tuned for pre-Skylake processors, for post-Skylake it should be 7
+ const int MeasureDurationMs = 10;
+ const int MaxYieldsPerNormalizedYield = 10; // measured typically 8-9 on pre-Skylake
+ const int MinNsPerNormalizedYield = 37; // measured typically 37-46 on post-Skylake
+ const int NsPerOptimialMaxSpinIterationDuration = 272; // approx. 900 cycles, measured 281 on pre-Skylake, 263 on post-Skylake
+ const int NsPerSecond = 1000 * 1000 * 1000;
+
+ LARGE_INTEGER li;
+ if (!QueryPerformanceFrequency(&li) || (ULONGLONG)li.QuadPart < 1000 / MeasureDurationMs)
+ {
+ // High precision clock not available or clock resolution is too low, resort to defaults
+ s_yieldsPerNormalizedYield = DefaultYieldsPerNormalizedYield;
+ s_optimalMaxNormalizedYieldsPerSpinIteration = DefaultOptimalMaxNormalizedYieldsPerSpinIteration;
+ return;
+ }
+ ULONGLONG ticksPerSecond = li.QuadPart;
+
+ // Measure the nanosecond delay per yield
+ ULONGLONG measureDurationTicks = ticksPerSecond / (1000 / MeasureDurationMs);
+ unsigned int yieldCount = 0;
+ QueryPerformanceCounter(&li);
+ ULONGLONG startTicks = li.QuadPart;
+ ULONGLONG elapsedTicks;
+ do
+ {
+ for (int i = 0; i < 10; ++i)
+ {
+ YieldProcessor();
+ }
+ yieldCount += 10;
+
+ QueryPerformanceCounter(&li);
+ ULONGLONG nowTicks = li.QuadPart;
+ elapsedTicks = nowTicks - startTicks;
+ } while (elapsedTicks < measureDurationTicks);
+ double nsPerYield = (double)elapsedTicks * NsPerSecond / ((double)yieldCount * ticksPerSecond);
+ if (nsPerYield < 1)
+ {
+ nsPerYield = 1;
+ }
+
+ // Calculate the number of yields required to span the duration of a normalized yield
+ int yieldsPerNormalizedYield = (int)(MinNsPerNormalizedYield / nsPerYield + 0.5);
+ if (yieldsPerNormalizedYield < 1)
+ {
+ yieldsPerNormalizedYield = 1;
+ }
+ else if (yieldsPerNormalizedYield > MaxYieldsPerNormalizedYield)
+ {
+ yieldsPerNormalizedYield = MaxYieldsPerNormalizedYield;
+ }
+
+ // Calculate the maximum number of yields that would be optimal for a late spin iteration. Typically, we would not want to
+ // spend excessive amounts of time (thousands of cycles) doing only YieldProcessor, as SwitchToThread/Sleep would do a
+ // better job of allowing other work to run.
+ int optimalMaxNormalizedYieldsPerSpinIteration =
+ (int)(NsPerOptimialMaxSpinIterationDuration / (yieldsPerNormalizedYield * nsPerYield) + 0.5);
+ if (optimalMaxNormalizedYieldsPerSpinIteration < 1)
+ {
+ optimalMaxNormalizedYieldsPerSpinIteration = 1;
+ }
+
+ s_yieldsPerNormalizedYield = yieldsPerNormalizedYield;
+ s_optimalMaxNormalizedYieldsPerSpinIteration = optimalMaxNormalizedYieldsPerSpinIteration;
+}
diff --git a/src/vm/threads.h b/src/vm/threads.h
index 93e39156c8..4000f216f4 100644
--- a/src/vm/threads.h
+++ b/src/vm/threads.h
@@ -515,6 +515,8 @@ typedef Thread::ForbidSuspendThreadHolder ForbidSuspendThreadHolder;
// Each thread has a stack that tracks all enter and leave requests
struct Dbg_TrackSync
{
+ virtual ~Dbg_TrackSync() = default;
+
virtual void EnterSync (UINT_PTR caller, void *pAwareLock) = 0;
virtual void LeaveSync (UINT_PTR caller, void *pAwareLock) = 0;
};
@@ -1944,7 +1946,7 @@ public:
// Create all new threads here. The thread is created as suspended, so
// you must ::ResumeThread to kick it off. It is guaranteed to create the
// thread, or throw.
- BOOL CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args);
+ BOOL CreateNewThread(SIZE_T stackSize, LPTHREAD_START_ROUTINE start, void *args, LPCWSTR pName=NULL);
enum StackSizeBucket
@@ -5249,11 +5251,9 @@ public:
// object associated with them (e.g., the bgc thread).
void SetGCSpecial(bool fGCSpecial);
-#ifndef FEATURE_PAL
private:
WORD m_wCPUGroup;
DWORD_PTR m_pAffinityMask;
-#endif // !FEATURE_PAL
public:
void ChooseThreadCPUGroupAffinity();
@@ -5362,6 +5362,71 @@ public:
m_HijackReturnKind = returnKind;
}
#endif // FEATURE_HIJACK
+
+private:
+ static CrstStatic s_initializeYieldProcessorNormalizedCrst;
+ static int s_yieldsPerNormalizedYield;
+ static int s_optimalMaxNormalizedYieldsPerSpinIteration;
+
+private:
+ static void InitializeYieldProcessorNormalized();
+
+public:
+ static bool IsYieldProcessorNormalizedInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return s_yieldsPerNormalizedYield != 0 && s_optimalMaxNormalizedYieldsPerSpinIteration != 0;
+ }
+
+public:
+ static void EnsureYieldProcessorNormalizedInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ if (!IsYieldProcessorNormalizedInitialized())
+ {
+ InitializeYieldProcessorNormalized();
+ }
+ }
+
+public:
+ static int GetOptimalMaxNormalizedYieldsPerSpinIteration()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsYieldProcessorNormalizedInitialized());
+
+ return s_optimalMaxNormalizedYieldsPerSpinIteration;
+ }
+
+public:
+ static void YieldProcessorNormalized()
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsYieldProcessorNormalizedInitialized());
+
+ int n = s_yieldsPerNormalizedYield;
+ while (--n >= 0)
+ {
+ YieldProcessor();
+ }
+ }
+
+ static void YieldProcessorNormalizedWithBackOff(unsigned int spinIteration)
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(IsYieldProcessorNormalizedInitialized());
+
+ int n = s_optimalMaxNormalizedYieldsPerSpinIteration;
+ if (spinIteration <= 30 && (1 << spinIteration) < n)
+ {
+ n = 1 << spinIteration;
+ }
+ n *= s_yieldsPerNormalizedYield;
+ while (--n >= 0)
+ {
+ YieldProcessor();
+ }
+ }
};
// End of class Thread
diff --git a/src/vm/tieredcompilation.cpp b/src/vm/tieredcompilation.cpp
index acc26b90a5..ea69bbfec7 100644
--- a/src/vm/tieredcompilation.cpp
+++ b/src/vm/tieredcompilation.cpp
@@ -12,6 +12,7 @@
#include "excep.h"
#include "log.h"
#include "win32threadpool.h"
+#include "threadsuspend.h"
#include "tieredcompilation.h"
// TieredCompilationManager determines which methods should be recompiled and
@@ -47,14 +48,16 @@
// # Important entrypoints in this code:
//
//
-// a) .ctor and Init(...) - called once during AppDomain initialization
-// b) OnMethodCalled(...) - called when a method is being invoked. When a method
-// has been called enough times this is currently the only
-// trigger that initiates re-compilation.
-// c) OnAppDomainShutdown() - called during AppDomain::Exit() to begin the process
-// of stopping tiered compilation. After this point no more
-// background optimization work will be initiated but in-progress
-// work still needs to complete.
+// a) .ctor and Init(...) - called once during AppDomain initialization
+// b) OnMethodCalled(...) - called when a method is being invoked. When a method
+// has been called enough times this is currently the only
+// trigger that initiates re-compilation.
+// c) Shutdown() - called during AppDomain::Exit() to begin the process
+// of stopping tiered compilation. After this point no more
+// background optimization work will be initiated but in-progress
+// work still needs to complete.
+// d) ShutdownAllDomains() - Called from EEShutdownHelper to block until all async work is
+// complete. We must do this before we shutdown the JIT.
//
// # Overall workflow
//
@@ -107,6 +110,7 @@ void TieredCompilationManager::Init(ADID appDomainId)
SpinLockHolder holder(&m_lock);
m_domainId = appDomainId;
+ m_asyncWorkDoneEvent.CreateManualEventNoThrow(TRUE);
}
// Called each time code in this AppDomain has been run. This is our sole entrypoint to begin
@@ -127,6 +131,43 @@ BOOL TieredCompilationManager::OnMethodCalled(MethodDesc* pMethodDesc, DWORD cur
{
return TRUE; // stop notifications for this method
}
+ AsyncPromoteMethodToTier1(pMethodDesc);
+ return TRUE;
+}
+
+void TieredCompilationManager::AsyncPromoteMethodToTier1(MethodDesc* pMethodDesc)
+{
+ STANDARD_VM_CONTRACT;
+
+ NativeCodeVersion t1NativeCodeVersion;
+
+ // Add an inactive native code entry in the versioning table to track the tier1
+ // compilation we are going to create. This entry binds the compilation to a
+ // particular version of the IL code regardless of any changes that may
+ // occur between now and when jitting completes. If the IL does change in that
+ // interval the new code entry won't be activated.
+ {
+ CodeVersionManager* pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ ILCodeVersion ilVersion = pCodeVersionManager->GetActiveILCodeVersion(pMethodDesc);
+ NativeCodeVersionCollection nativeVersions = ilVersion.GetNativeCodeVersions(pMethodDesc);
+ for (NativeCodeVersionIterator cur = nativeVersions.Begin(), end = nativeVersions.End(); cur != end; cur++)
+ {
+ if (cur->GetOptimizationTier() == NativeCodeVersion::OptimizationTier1)
+ {
+ // we've already promoted
+ return;
+ }
+ }
+
+ if (FAILED(ilVersion.AddNativeCodeVersion(pMethodDesc, &t1NativeCodeVersion)))
+ {
+ // optimization didn't work for some reason (presumably OOM)
+ // just give up and continue on
+ return;
+ }
+ t1NativeCodeVersion.SetOptimizationTier(NativeCodeVersion::OptimizationTier1);
+ }
// Insert the method into the optimization queue and trigger a thread to service
// the queue if needed.
@@ -141,7 +182,7 @@ BOOL TieredCompilationManager::OnMethodCalled(MethodDesc* pMethodDesc, DWORD cur
// unserviced. Synchronous retries appear unlikely to offer any material improvement
// and complicating the code to narrow an already rare error case isn't desirable.
{
- SListElem<MethodDesc*>* pMethodListItem = new (nothrow) SListElem<MethodDesc*>(pMethodDesc);
+ SListElem<NativeCodeVersion>* pMethodListItem = new (nothrow) SListElem<NativeCodeVersion>(t1NativeCodeVersion);
SpinLockHolder holder(&m_lock);
if (pMethodListItem != NULL)
{
@@ -152,11 +193,11 @@ BOOL TieredCompilationManager::OnMethodCalled(MethodDesc* pMethodDesc, DWORD cur
{
// Our current policy throttles at 1 thread, but in the future we
// could experiment with more parallelism.
- m_countOptimizationThreadsRunning++;
+ IncrementWorkerThreadCount();
}
else
{
- return TRUE; // stop notifications for this method
+ return;
}
}
@@ -165,7 +206,7 @@ BOOL TieredCompilationManager::OnMethodCalled(MethodDesc* pMethodDesc, DWORD cur
if (!ThreadpoolMgr::QueueUserWorkItem(StaticOptimizeMethodsCallback, this, QUEUE_ONLY, TRUE))
{
SpinLockHolder holder(&m_lock);
- m_countOptimizationThreadsRunning--;
+ DecrementWorkerThreadCount();
STRESS_LOG1(LF_TIEREDCOMPILATION, LL_WARNING, "TieredCompilationManager::OnMethodCalled: "
"ThreadpoolMgr::QueueUserWorkItem returned FALSE (no thread will run), method=%pM\n",
pMethodDesc);
@@ -174,20 +215,45 @@ BOOL TieredCompilationManager::OnMethodCalled(MethodDesc* pMethodDesc, DWORD cur
EX_CATCH
{
SpinLockHolder holder(&m_lock);
- m_countOptimizationThreadsRunning--;
+ DecrementWorkerThreadCount();
STRESS_LOG2(LF_TIEREDCOMPILATION, LL_WARNING, "TieredCompilationManager::OnMethodCalled: "
"Exception queuing work item to threadpool, hr=0x%x, method=%pM\n",
GET_EXCEPTION()->GetHR(), pMethodDesc);
}
EX_END_CATCH(RethrowTerminalExceptions);
- return TRUE; // stop notifications for this method
+ return;
}
-void TieredCompilationManager::OnAppDomainShutdown()
+// static
+// called from EEShutDownHelper
+void TieredCompilationManager::ShutdownAllDomains()
{
- SpinLockHolder holder(&m_lock);
- m_isAppDomainShuttingDown = TRUE;
+ STANDARD_VM_CONTRACT;
+
+ AppDomainIterator domain(TRUE);
+ while (domain.Next())
+ {
+ AppDomain * pDomain = domain.GetDomain();
+ if (pDomain != NULL)
+ {
+ pDomain->GetTieredCompilationManager()->Shutdown(TRUE);
+ }
+ }
+}
+
+void TieredCompilationManager::Shutdown(BOOL fBlockUntilAsyncWorkIsComplete)
+{
+ STANDARD_VM_CONTRACT;
+
+ {
+ SpinLockHolder holder(&m_lock);
+ m_isAppDomainShuttingDown = TRUE;
+ }
+ if (fBlockUntilAsyncWorkIsComplete)
+ {
+ m_asyncWorkDoneEvent.Wait(INFINITE, FALSE);
+ }
}
// This is the initial entrypoint for the background thread, called by
@@ -221,31 +287,33 @@ void TieredCompilationManager::OptimizeMethodsCallback()
SpinLockHolder holder(&m_lock);
if (m_isAppDomainShuttingDown)
{
- m_countOptimizationThreadsRunning--;
+ DecrementWorkerThreadCount();
return;
}
}
ULONGLONG startTickCount = CLRGetTickCount64();
- MethodDesc* pMethod = NULL;
+ NativeCodeVersion nativeCodeVersion;
EX_TRY
{
+ GCX_COOP();
ENTER_DOMAIN_ID(m_domainId);
{
+ GCX_PREEMP();
while (true)
{
{
SpinLockHolder holder(&m_lock);
- pMethod = GetNextMethodToOptimize();
- if (pMethod == NULL ||
+ nativeCodeVersion = GetNextMethodToOptimize();
+ if (nativeCodeVersion.IsNull() ||
m_isAppDomainShuttingDown)
{
- m_countOptimizationThreadsRunning--;
+ DecrementWorkerThreadCount();
break;
}
}
- OptimizeMethod(pMethod);
+ OptimizeMethod(nativeCodeVersion);
// If we have been running for too long return the thread to the threadpool and queue another event
// This gives the threadpool a chance to service other requests on this thread before returning to
@@ -256,7 +324,7 @@ void TieredCompilationManager::OptimizeMethodsCallback()
if (!ThreadpoolMgr::QueueUserWorkItem(StaticOptimizeMethodsCallback, this, QUEUE_ONLY, TRUE))
{
SpinLockHolder holder(&m_lock);
- m_countOptimizationThreadsRunning--;
+ DecrementWorkerThreadCount();
STRESS_LOG0(LF_TIEREDCOMPILATION, LL_WARNING, "TieredCompilationManager::OptimizeMethodsCallback: "
"ThreadpoolMgr::QueueUserWorkItem returned FALSE (no thread will run)\n");
}
@@ -270,51 +338,35 @@ void TieredCompilationManager::OptimizeMethodsCallback()
{
STRESS_LOG2(LF_TIEREDCOMPILATION, LL_ERROR, "TieredCompilationManager::OptimizeMethodsCallback: "
"Unhandled exception during method optimization, hr=0x%x, last method=%pM\n",
- GET_EXCEPTION()->GetHR(), pMethod);
+ GET_EXCEPTION()->GetHR(), nativeCodeVersion.GetMethodDesc());
}
EX_END_CATCH(RethrowTerminalExceptions);
}
// Jit compiles and installs new optimized code for a method.
// Called on a background thread.
-void TieredCompilationManager::OptimizeMethod(MethodDesc* pMethod)
+void TieredCompilationManager::OptimizeMethod(NativeCodeVersion nativeCodeVersion)
{
STANDARD_VM_CONTRACT;
- _ASSERTE(pMethod->IsEligibleForTieredCompilation());
- PCODE pJittedCode = CompileMethod(pMethod);
- if (pJittedCode != NULL)
+ _ASSERTE(nativeCodeVersion.GetMethodDesc()->IsEligibleForTieredCompilation());
+ if (CompileCodeVersion(nativeCodeVersion))
{
- InstallMethodCode(pMethod, pJittedCode);
+ ActivateCodeVersion(nativeCodeVersion);
}
}
// Compiles new optimized code for a method.
// Called on a background thread.
-PCODE TieredCompilationManager::CompileMethod(MethodDesc* pMethod)
+BOOL TieredCompilationManager::CompileCodeVersion(NativeCodeVersion nativeCodeVersion)
{
STANDARD_VM_CONTRACT;
PCODE pCode = NULL;
- ULONG sizeOfCode = 0;
+ MethodDesc* pMethod = nativeCodeVersion.GetMethodDesc();
EX_TRY
{
- CORJIT_FLAGS flags = CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_MCJIT_BACKGROUND);
- flags.Add(CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_TIER1));
-
- if (pMethod->IsDynamicMethod())
- {
- ILStubResolver* pResolver = pMethod->AsDynamicMethodDesc()->GetILStubResolver();
- flags.Add(pResolver->GetJitFlags());
- COR_ILMETHOD_DECODER* pILheader = pResolver->GetILHeader();
- pCode = UnsafeJitFunction(pMethod, pILheader, flags, &sizeOfCode);
- }
- else
- {
- COR_ILMETHOD_DECODER::DecoderStatus status;
- COR_ILMETHOD_DECODER header(pMethod->GetILHeader(), pMethod->GetModule()->GetMDImport(), &status);
- pCode = UnsafeJitFunction(pMethod, &header, flags, &sizeOfCode);
- }
+ pCode = pMethod->PrepareCode(nativeCodeVersion);
}
EX_CATCH
{
@@ -324,58 +376,117 @@ PCODE TieredCompilationManager::CompileMethod(MethodDesc* pMethod)
}
EX_END_CATCH(RethrowTerminalExceptions)
- return pCode;
+ return pCode != NULL;
}
// Updates the MethodDesc and precode so that future invocations of a method will
// execute the native code pointed to by pCode.
// Called on a background thread.
-void TieredCompilationManager::InstallMethodCode(MethodDesc* pMethod, PCODE pCode)
+void TieredCompilationManager::ActivateCodeVersion(NativeCodeVersion nativeCodeVersion)
{
STANDARD_VM_CONTRACT;
- _ASSERTE(!pMethod->IsNativeCodeStableAfterInit());
+ MethodDesc* pMethod = nativeCodeVersion.GetMethodDesc();
+ CodeVersionManager* pCodeVersionManager = pMethod->GetCodeVersionManager();
- PCODE pExistingCode = pMethod->GetNativeCode();
-#ifdef FEATURE_INTERPRETER
- if (!pMethod->SetNativeCodeInterlocked(pCode, pExistingCode, TRUE))
-#else
- if (!pMethod->SetNativeCodeInterlocked(pCode, pExistingCode))
-#endif
+ // If the ilParent version is active this will activate the native code version now.
+ // Otherwise if the ilParent version becomes active again in the future the native
+ // code version will activate then.
+ ILCodeVersion ilParent;
+ HRESULT hr = S_OK;
{
- //We aren't there yet, but when the feature is finished we shouldn't be racing against any other code mutator and there would be no
- //reason for this to fail
- STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "TieredCompilationManager::InstallMethodCode: Method %pM failed to update native code slot. Code=%pK\n",
- pMethod, pCode);
+ // As long as we are exclusively using precode publishing for tiered compilation
+ // methods this first attempt should succeed
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ ilParent = nativeCodeVersion.GetILCodeVersion();
+ hr = ilParent.SetActiveNativeCodeVersion(nativeCodeVersion, FALSE);
}
- else
+ if (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED)
{
- Precode* pPrecode = pMethod->GetPrecode();
- if (!pPrecode->SetTargetInterlocked(pCode, FALSE))
+ // if we start using jump-stamp publishing for tiered compilation, the first attempt
+ // without the runtime suspended will fail and then this second attempt will
+ // succeed.
+ // Even though this works performance is likely to be quite bad. Realistically
+ // we are going to need batched updates to makes tiered-compilation + jump-stamp
+ // viable. This fallback path is just here as a proof-of-concept.
+ ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
{
- //We aren't there yet, but when the feature is finished we shouldn't be racing against any other code mutator and there would be no
- //reason for this to fail
- STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "TieredCompilationManager::InstallMethodCode: Method %pM failed to update precode. Code=%pK\n",
- pMethod, pCode);
+ CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
+ hr = ilParent.SetActiveNativeCodeVersion(nativeCodeVersion, TRUE);
}
+ ThreadSuspend::RestartEE(FALSE, TRUE);
+ }
+ if (FAILED(hr))
+ {
+ STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "TieredCompilationManager::ActivateCodeVersion: Method %pM failed to publish native code for native code version %d\n",
+ pMethod, nativeCodeVersion.GetVersionId());
}
}
// Dequeues the next method in the optmization queue.
// This should be called with m_lock already held and runs
// on the background thread.
-MethodDesc* TieredCompilationManager::GetNextMethodToOptimize()
+NativeCodeVersion TieredCompilationManager::GetNextMethodToOptimize()
{
STANDARD_VM_CONTRACT;
- SListElem<MethodDesc*>* pElem = m_methodsToOptimize.RemoveHead();
+ SListElem<NativeCodeVersion>* pElem = m_methodsToOptimize.RemoveHead();
if (pElem != NULL)
{
- MethodDesc* pMD = pElem->GetValue();
+ NativeCodeVersion nativeCodeVersion = pElem->GetValue();
delete pElem;
- return pMD;
+ return nativeCodeVersion;
+ }
+ return NativeCodeVersion();
+}
+
+void TieredCompilationManager::IncrementWorkerThreadCount()
+{
+ STANDARD_VM_CONTRACT;
+ //m_lock should be held
+
+ m_countOptimizationThreadsRunning++;
+ m_asyncWorkDoneEvent.Reset();
+}
+
+void TieredCompilationManager::DecrementWorkerThreadCount()
+{
+ STANDARD_VM_CONTRACT;
+ //m_lock should be held
+
+ m_countOptimizationThreadsRunning--;
+ if (m_countOptimizationThreadsRunning == 0)
+ {
+ m_asyncWorkDoneEvent.Set();
+ }
+}
+
+//static
+CORJIT_FLAGS TieredCompilationManager::GetJitFlags(NativeCodeVersion nativeCodeVersion)
+{
+ LIMITED_METHOD_CONTRACT;
+
+ CORJIT_FLAGS flags;
+ if (!nativeCodeVersion.GetMethodDesc()->IsEligibleForTieredCompilation())
+ {
+#ifdef FEATURE_INTERPRETER
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE);
+#endif
+ return flags;
+ }
+
+ if (nativeCodeVersion.GetOptimizationTier() == NativeCodeVersion::OptimizationTier0)
+ {
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER0);
+ }
+ else
+ {
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER1);
+#ifdef FEATURE_INTERPRETER
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE);
+#endif
}
- return NULL;
+ return flags;
}
#endif // FEATURE_TIERED_COMPILATION
diff --git a/src/vm/tieredcompilation.h b/src/vm/tieredcompilation.h
index 71236c5374..9f6187244a 100644
--- a/src/vm/tieredcompilation.h
+++ b/src/vm/tieredcompilation.h
@@ -26,24 +26,31 @@ public:
void Init(ADID appDomainId);
BOOL OnMethodCalled(MethodDesc* pMethodDesc, DWORD currentCallCount);
- void OnAppDomainShutdown();
+ void AsyncPromoteMethodToTier1(MethodDesc* pMethodDesc);
+ static void ShutdownAllDomains();
+ void Shutdown(BOOL fBlockUntilAsyncWorkIsComplete);
+ static CORJIT_FLAGS GetJitFlags(NativeCodeVersion nativeCodeVersion);
private:
static DWORD StaticOptimizeMethodsCallback(void* args);
void OptimizeMethodsCallback();
- void OptimizeMethod(MethodDesc* pMethod);
- MethodDesc* GetNextMethodToOptimize();
- PCODE CompileMethod(MethodDesc* pMethod);
- void InstallMethodCode(MethodDesc* pMethod, PCODE pCode);
+ void OptimizeMethod(NativeCodeVersion nativeCodeVersion);
+ NativeCodeVersion GetNextMethodToOptimize();
+ BOOL CompileCodeVersion(NativeCodeVersion nativeCodeVersion);
+ void ActivateCodeVersion(NativeCodeVersion nativeCodeVersion);
+
+ void IncrementWorkerThreadCount();
+ void DecrementWorkerThreadCount();
SpinLock m_lock;
- SList<SListElem<MethodDesc*>> m_methodsToOptimize;
+ SList<SListElem<NativeCodeVersion>> m_methodsToOptimize;
ADID m_domainId;
BOOL m_isAppDomainShuttingDown;
DWORD m_countOptimizationThreadsRunning;
DWORD m_callCountOptimizationThreshhold;
DWORD m_optimizationQuantumMs;
+ CLREvent m_asyncWorkDoneEvent;
};
#endif // FEATURE_TIERED_COMPILATION
diff --git a/src/vm/typehandle.cpp b/src/vm/typehandle.cpp
index 32384cc490..9e8afba82f 100644
--- a/src/vm/typehandle.cpp
+++ b/src/vm/typehandle.cpp
@@ -494,7 +494,6 @@ DWORD TypeHandle::IsTransparentProxy() const
return FALSE;
}
-#ifdef FEATURE_HFA
bool TypeHandle::IsHFA() const
{
WRAPPER_NO_CONTRACT;
@@ -520,7 +519,7 @@ CorElementType TypeHandle::GetHFAType() const
return ELEMENT_TYPE_END;
}
-#endif // FEATURE_HFA
+
#ifdef FEATURE_64BIT_ALIGNMENT
bool TypeHandle::RequiresAlign8() const
@@ -1546,6 +1545,14 @@ BOOL TypeHandle::IsByRef() const
}
+BOOL TypeHandle::IsByRefLike() const
+{
+ LIMITED_METHOD_CONTRACT;
+
+ return(!IsTypeDesc() && AsMethodTable()->IsByRefLike());
+
+}
+
BOOL TypeHandle::IsPointer() const
{
LIMITED_METHOD_CONTRACT;
diff --git a/src/vm/typehandle.h b/src/vm/typehandle.h
index 72a5656cc3..cec772e3be 100644
--- a/src/vm/typehandle.h
+++ b/src/vm/typehandle.h
@@ -394,10 +394,8 @@ public:
CHECK CheckFullyLoaded();
#endif
-#ifdef FEATURE_HFA
bool IsHFA() const;
CorElementType GetHFAType() const;
-#endif // FEATURE_HFA
#ifdef FEATURE_64BIT_ALIGNMENT
bool RequiresAlign8() const;
@@ -513,6 +511,9 @@ public:
// BYREF
BOOL IsByRef() const;
+ // BYREFLIKE (does not return TRUE for IsByRef types)
+ BOOL IsByRefLike() const;
+
// PTR
BOOL IsPointer() const;
diff --git a/src/vm/util.cpp b/src/vm/util.cpp
index 260e0daa38..2c71289c8d 100644
--- a/src/vm/util.cpp
+++ b/src/vm/util.cpp
@@ -1861,8 +1861,6 @@ size_t GetLargestOnDieCacheSize(BOOL bTrueSize)
STATIC_CONTRACT_NOTHROW;
STATIC_CONTRACT_GC_NOTRIGGER;
-#if defined(_TARGET_AMD64_) || defined (_TARGET_X86_)
-
static size_t maxSize;
static size_t maxTrueSize;
@@ -1879,6 +1877,7 @@ size_t GetLargestOnDieCacheSize(BOOL bTrueSize)
}
}
+#if defined(_TARGET_AMD64_) || defined (_TARGET_X86_)
DefaultCatchFilterParam param;
param.pv = COMPLUS_EXCEPTION_EXECUTE_HANDLER;
@@ -2001,18 +2000,20 @@ size_t GetLargestOnDieCacheSize(BOOL bTrueSize)
{
}
PAL_ENDTRY
+#else
+ maxSize = maxTrueSize = GetLogicalProcessorCacheSizeFromOS() ; // Returns the size of the highest level processor cache
+#endif
+
+#if defined(_TARGET_ARM64_)
+ // Bigger gen0 size helps arm64 targets
+ maxSize = maxTrueSize * 3;
+#endif
// printf("GetLargestOnDieCacheSize returns %d, adjusted size %d\n", maxSize, maxTrueSize);
if (bTrueSize)
return maxTrueSize;
else
return maxSize;
-
-#else
- size_t cache_size = GetLogicalProcessorCacheSizeFromOS() ; // Returns the size of the highest level processor cache
- return cache_size;
-
-#endif
}
//---------------------------------------------------------------------
@@ -2845,7 +2846,6 @@ void InitializeClrNotifications()
#if defined(FEATURE_GDBJIT)
#include "gdbjit.h"
-__declspec(thread) bool tls_isSymReaderInProgress = false;
#endif // FEATURE_GDBJIT
// called from the runtime
@@ -2859,19 +2859,12 @@ void DACNotify::DoJITNotification(MethodDesc *MethodDescPtr)
MODE_PREEMPTIVE;
}
CONTRACTL_END;
-#if defined(FEATURE_GDBJIT) && defined(FEATURE_PAL) && !defined(CROSSGEN_COMPILE)
- if(!tls_isSymReaderInProgress)
- {
- tls_isSymReaderInProgress = true;
- NotifyGdb::MethodCompiled(MethodDescPtr);
- tls_isSymReaderInProgress = false;
- }
-#endif
+
TADDR Args[2] = { JIT_NOTIFICATION, (TADDR) MethodDescPtr };
DACNotifyExceptionHelper(Args, 2);
}
-void DACNotify::DoJITDiscardNotification(MethodDesc *MethodDescPtr)
+void DACNotify::DoJITPitchingNotification(MethodDesc *MethodDescPtr)
{
CONTRACTL
{
@@ -2883,9 +2876,9 @@ void DACNotify::DoJITDiscardNotification(MethodDesc *MethodDescPtr)
CONTRACTL_END;
#if defined(FEATURE_GDBJIT) && defined(FEATURE_PAL) && !defined(CROSSGEN_COMPILE)
- NotifyGdb::MethodDropped(MethodDescPtr);
+ NotifyGdb::MethodPitched(MethodDescPtr);
#endif
- TADDR Args[2] = { JIT_DISCARD_NOTIFICATION, (TADDR) MethodDescPtr };
+ TADDR Args[2] = { JIT_PITCHING_NOTIFICATION, (TADDR) MethodDescPtr };
DACNotifyExceptionHelper(Args, 2);
}
@@ -3007,10 +3000,10 @@ BOOL DACNotify::ParseJITNotification(TADDR Args[], TADDR& MethodDescPtr)
return TRUE;
}
-BOOL DACNotify::ParseJITDiscardNotification(TADDR Args[], TADDR& MethodDescPtr)
+BOOL DACNotify::ParseJITPitchingNotification(TADDR Args[], TADDR& MethodDescPtr)
{
- _ASSERTE(Args[0] == JIT_DISCARD_NOTIFICATION);
- if (Args[0] != JIT_DISCARD_NOTIFICATION)
+ _ASSERTE(Args[0] == JIT_PITCHING_NOTIFICATION);
+ if (Args[0] != JIT_PITCHING_NOTIFICATION)
{
return FALSE;
}
diff --git a/src/vm/util.hpp b/src/vm/util.hpp
index 1f86d6c2d5..edfd9161e4 100644
--- a/src/vm/util.hpp
+++ b/src/vm/util.hpp
@@ -44,6 +44,13 @@
#define UtilMessageBoxNonLocalizedVA __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
#define WszMessageBox __error("Use one of the EEMessageBox APIs (defined in eemessagebox.h) from inside the EE")
+// Hot cache lines need to be aligned to cache line size to improve performance
+#if defined(ARM64)
+#define MAX_CACHE_LINE_SIZE 128
+#else
+#define MAX_CACHE_LINE_SIZE 64
+#endif
+
//========================================================================
// More convenient names for integer types of a guaranteed size.
//========================================================================
@@ -1064,7 +1071,7 @@ public:
MODULE_LOAD_NOTIFICATION=1,
MODULE_UNLOAD_NOTIFICATION=2,
JIT_NOTIFICATION=3,
- JIT_DISCARD_NOTIFICATION=4,
+ JIT_PITCHING_NOTIFICATION=4,
EXCEPTION_NOTIFICATION=5,
GC_NOTIFICATION= 6,
CATCH_ENTER_NOTIFICATION = 7,
@@ -1072,7 +1079,7 @@ public:
// called from the runtime
static void DoJITNotification(MethodDesc *MethodDescPtr);
- static void DoJITDiscardNotification(MethodDesc *MethodDescPtr);
+ static void DoJITPitchingNotification(MethodDesc *MethodDescPtr);
static void DoModuleLoadNotification(Module *Module);
static void DoModuleUnloadNotification(Module *Module);
static void DoExceptionNotification(class Thread* ThreadPtr);
@@ -1082,7 +1089,7 @@ public:
// called from the DAC
static int GetType(TADDR Args[]);
static BOOL ParseJITNotification(TADDR Args[], TADDR& MethodDescPtr);
- static BOOL ParseJITDiscardNotification(TADDR Args[], TADDR& MethodDescPtr);
+ static BOOL ParseJITPitchingNotification(TADDR Args[], TADDR& MethodDescPtr);
static BOOL ParseModuleLoadNotification(TADDR Args[], TADDR& ModulePtr);
static BOOL ParseModuleUnloadNotification(TADDR Args[], TADDR& ModulePtr);
static BOOL ParseExceptionNotification(TADDR Args[], TADDR& ThreadPtr);
diff --git a/src/vm/vars.cpp b/src/vm/vars.cpp
index 3a8046b26b..ff941d2101 100644
--- a/src/vm/vars.cpp
+++ b/src/vm/vars.cpp
@@ -99,7 +99,6 @@ GPTR_IMPL(MethodTable, g_pICastableInterface);
GPTR_IMPL(MethodDesc, g_pExecuteBackoutCodeHelperMethod);
-GPTR_IMPL(MethodDesc, g_pObjectCtorMD);
GPTR_IMPL(MethodDesc, g_pObjectFinalizerMD);
GPTR_IMPL(Thread,g_pFinalizerThread);
diff --git a/src/vm/vars.hpp b/src/vm/vars.hpp
index cc167f2809..c9f4848692 100644
--- a/src/vm/vars.hpp
+++ b/src/vm/vars.hpp
@@ -404,7 +404,6 @@ GPTR_DECL(MethodTable, g_pICastableInterface);
GPTR_DECL(MethodDesc, g_pExecuteBackoutCodeHelperMethod);
-GPTR_DECL(MethodDesc, g_pObjectCtorMD);
GPTR_DECL(MethodDesc, g_pObjectFinalizerMD);
//<TODO> @TODO Remove eventually - determines whether the verifier throws an exception when something fails</TODO>
diff --git a/src/vm/virtualcallstub.cpp b/src/vm/virtualcallstub.cpp
index c230f254c6..2e94a16666 100644
--- a/src/vm/virtualcallstub.cpp
+++ b/src/vm/virtualcallstub.cpp
@@ -20,6 +20,10 @@
#include "compile.h"
#endif
+#ifdef FEATURE_PERFMAP
+#include "perfmap.h"
+#endif
+
#ifndef DACCESS_COMPILE
//@TODO: make these conditional on whether logs are being produced
@@ -2694,6 +2698,10 @@ DispatchHolder *VirtualCallStubManager::GenerateDispatchStub(PCODE ad
LOG((LF_STUBS, LL_INFO10000, "GenerateDispatchStub for token" FMT_ADDR "and pMT" FMT_ADDR "at" FMT_ADDR "\n",
DBG_ADDR(dispatchToken), DBG_ADDR(pMTExpected), DBG_ADDR(holder->stub())));
+#ifdef FEATURE_PERFMAP
+ PerfMap::LogStubs(__FUNCTION__, "GenerateDispatchStub", (PCODE)holder->stub(), holder->stub()->size());
+#endif
+
RETURN (holder);
}
@@ -2736,6 +2744,10 @@ DispatchHolder *VirtualCallStubManager::GenerateDispatchStubLong(PCODE
LOG((LF_STUBS, LL_INFO10000, "GenerateDispatchStub for token" FMT_ADDR "and pMT" FMT_ADDR "at" FMT_ADDR "\n",
DBG_ADDR(dispatchToken), DBG_ADDR(pMTExpected), DBG_ADDR(holder->stub())));
+#ifdef FEATURE_PERFMAP
+ PerfMap::LogStubs(__FUNCTION__, "GenerateDispatchStub", (PCODE)holder->stub(), holder->stub()->size());
+#endif
+
RETURN (holder);
}
#endif
@@ -2822,6 +2834,10 @@ ResolveHolder *VirtualCallStubManager::GenerateResolveStub(PCODE addr
LOG((LF_STUBS, LL_INFO10000, "GenerateResolveStub for token" FMT_ADDR "at" FMT_ADDR "\n",
DBG_ADDR(dispatchToken), DBG_ADDR(holder->stub())));
+#ifdef FEATURE_PERFMAP
+ PerfMap::LogStubs(__FUNCTION__, "GenerateResolveStub", (PCODE)holder->stub(), holder->stub()->size());
+#endif
+
RETURN (holder);
}
@@ -2852,6 +2868,10 @@ LookupHolder *VirtualCallStubManager::GenerateLookupStub(PCODE addrOfResolver, s
LOG((LF_STUBS, LL_INFO10000, "GenerateLookupStub for token" FMT_ADDR "at" FMT_ADDR "\n",
DBG_ADDR(dispatchToken), DBG_ADDR(holder->stub())));
+#ifdef FEATURE_PERFMAP
+ PerfMap::LogStubs(__FUNCTION__, "GenerateLookupStub", (PCODE)holder->stub(), holder->stub()->size());
+#endif
+
RETURN (holder);
}
diff --git a/src/vm/win32threadpool.cpp b/src/vm/win32threadpool.cpp
index 18df0dc76e..eabbcb93ae 100644
--- a/src/vm/win32threadpool.cpp
+++ b/src/vm/win32threadpool.cpp
@@ -86,7 +86,7 @@ SVAL_IMPL(LONG,ThreadpoolMgr,MaxFreeCPThreads); // = MaxFreeCP
Volatile<LONG> ThreadpoolMgr::NumCPInfrastructureThreads = 0; // number of threads currently busy handling draining cycle
// Cacheline aligned, hot variable
-DECLSPEC_ALIGN(64) SVAL_IMPL(ThreadpoolMgr::ThreadCounter, ThreadpoolMgr, WorkerCounter);
+DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) SVAL_IMPL(ThreadpoolMgr::ThreadCounter, ThreadpoolMgr, WorkerCounter);
SVAL_IMPL(LONG,ThreadpoolMgr,MinLimitTotalWorkerThreads); // = MaxLimitCPThreadsPerCPU * number of CPUS
SVAL_IMPL(LONG,ThreadpoolMgr,MaxLimitTotalWorkerThreads); // = MaxLimitCPThreadsPerCPU * number of CPUS
@@ -97,7 +97,7 @@ LONG ThreadpoolMgr::cpuUtilizationAverage = 0;
HillClimbing ThreadpoolMgr::HillClimbingInstance;
// Cacheline aligned, 3 hot variables updated in a group
-DECLSPEC_ALIGN(64) LONG ThreadpoolMgr::PriorCompletedWorkRequests = 0;
+DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) LONG ThreadpoolMgr::PriorCompletedWorkRequests = 0;
DWORD ThreadpoolMgr::PriorCompletedWorkRequestsTime;
DWORD ThreadpoolMgr::NextCompletedWorkRequestsTime;
@@ -116,10 +116,10 @@ int ThreadpoolMgr::ThreadAdjustmentInterval;
LONG ThreadpoolMgr::Initialization=0; // indicator of whether the threadpool is initialized.
// Cacheline aligned, hot variable
-DECLSPEC_ALIGN(64) unsigned int ThreadpoolMgr::LastDequeueTime; // used to determine if work items are getting thread starved
+DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) unsigned int ThreadpoolMgr::LastDequeueTime; // used to determine if work items are getting thread starved
// Move out of from preceeding variables' cache line
-DECLSPEC_ALIGN(64) int ThreadpoolMgr::offset_counter = 0;
+DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) int ThreadpoolMgr::offset_counter = 0;
SPTR_IMPL(WorkRequest,ThreadpoolMgr,WorkRequestHead); // Head of work request queue
SPTR_IMPL(WorkRequest,ThreadpoolMgr,WorkRequestTail); // Head of work request queue
@@ -144,17 +144,17 @@ HANDLE ThreadpoolMgr::TimerThread=NULL;
Thread *ThreadpoolMgr::pTimerThread=NULL;
// Cacheline aligned, hot variable
-DECLSPEC_ALIGN(64) DWORD ThreadpoolMgr::LastTickCount;
+DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) DWORD ThreadpoolMgr::LastTickCount;
#ifdef _DEBUG
DWORD ThreadpoolMgr::TickCountAdjustment=0;
#endif
// Cacheline aligned, hot variable
-DECLSPEC_ALIGN(64) LONG ThreadpoolMgr::GateThreadStatus=GATE_THREAD_STATUS_NOT_RUNNING;
+DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) LONG ThreadpoolMgr::GateThreadStatus=GATE_THREAD_STATUS_NOT_RUNNING;
// Move out of from preceeding variables' cache line
-DECLSPEC_ALIGN(64) ThreadpoolMgr::RecycledListsWrapper ThreadpoolMgr::RecycledLists;
+DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) ThreadpoolMgr::RecycledListsWrapper ThreadpoolMgr::RecycledLists;
ThreadpoolMgr::TimerInfo *ThreadpoolMgr::TimerInfosToBeRecycled = NULL;
@@ -1815,8 +1815,8 @@ Thread* ThreadpoolMgr::CreateUnimpersonatedThread(LPTHREAD_START_ROUTINE lpStart
// CreateNewThread takes care of reverting any impersonation - so dont do anything here.
bOK = pThread->CreateNewThread(0, // default stack size
lpStartAddress,
- lpArgs //arguments
- );
+ lpArgs, //arguments
+ W(".NET Core ThreadPool"));
}
else {
#ifndef FEATURE_PAL
diff --git a/src/vm/win32threadpool.h b/src/vm/win32threadpool.h
index 65be889a50..fc5742b494 100644
--- a/src/vm/win32threadpool.h
+++ b/src/vm/win32threadpool.h
@@ -123,9 +123,8 @@ class ThreadpoolMgr
class UnfairSemaphore
{
private:
-
// padding to ensure we get our own cache line
- BYTE padding1[64];
+ BYTE padding1[MAX_CACHE_LINE_SIZE];
//
// We track everything we care about in a single 64-bit struct to allow us to
@@ -146,12 +145,12 @@ class ThreadpoolMgr
} m_counts;
private:
+ // padding to ensure we get our own cache line
+ BYTE padding2[MAX_CACHE_LINE_SIZE];
+
const int m_spinLimitPerProcessor; //used when calculating max spin duration
CLRSemaphore m_sem; //waiters wait on this
- // padding to ensure we get our own cache line
- BYTE padding2[64];
-
INDEBUG(int m_maxCount;)
bool UpdateCounts(Counts newCounts, Counts currentCounts)
@@ -350,6 +349,9 @@ public:
{
static const int MaxPossibleCount = 0x7fff;
+ // padding to ensure we get our own cache line
+ BYTE padding1[MAX_CACHE_LINE_SIZE];
+
union Counts
{
struct
@@ -370,11 +372,10 @@ public:
LONGLONG AsLongLong;
bool operator==(Counts other) {LIMITED_METHOD_CONTRACT; return AsLongLong == other.AsLongLong;}
-
} counts;
// padding to ensure we get our own cache line
- BYTE padding[64];
+ BYTE padding2[MAX_CACHE_LINE_SIZE];
Counts GetCleanCounts()
{
@@ -962,11 +963,11 @@ public:
//
class RecycledListsWrapper
{
- DWORD CacheGuardPre[64/sizeof(DWORD)];
+ DWORD CacheGuardPre[MAX_CACHE_LINE_SIZE/sizeof(DWORD)];
RecycledListInfo (*pRecycledListPerProcessor)[MEMTYPE_COUNT]; // RecycledListInfo [numProc][MEMTYPE_COUNT]
- DWORD CacheGuardPost[64/sizeof(DWORD)];
+ DWORD CacheGuardPost[MAX_CACHE_LINE_SIZE/sizeof(DWORD)];
public:
void Initialize( unsigned int numProcs );
@@ -1247,11 +1248,11 @@ private:
SVAL_DECL(LONG,MinLimitTotalWorkerThreads); // same as MinLimitTotalCPThreads
SVAL_DECL(LONG,MaxLimitTotalWorkerThreads); // same as MaxLimitTotalCPThreads
- DECLSPEC_ALIGN(64) static unsigned int LastDequeueTime; // used to determine if work items are getting thread starved
+ DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) static unsigned int LastDequeueTime; // used to determine if work items are getting thread starved
static HillClimbing HillClimbingInstance;
- DECLSPEC_ALIGN(64) static LONG PriorCompletedWorkRequests;
+ DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) static LONG PriorCompletedWorkRequests;
static DWORD PriorCompletedWorkRequestsTime;
static DWORD NextCompletedWorkRequestsTime;
@@ -1277,7 +1278,7 @@ private:
static const DWORD WorkerTimeout = 20 * 1000;
static const DWORD WorkerTimeoutAppX = 5 * 1000; // shorter timeout to allow threads to exit prior to app suspension
- DECLSPEC_ALIGN(64) SVAL_DECL(ThreadCounter,WorkerCounter);
+ DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) SVAL_DECL(ThreadCounter,WorkerCounter);
//
// WorkerSemaphore is an UnfairSemaphore because:
@@ -1306,7 +1307,7 @@ private:
SVAL_DECL(LIST_ENTRY,TimerQueue); // queue of timers
static HANDLE TimerThread; // Currently we only have one timer thread
static Thread* pTimerThread;
- DECLSPEC_ALIGN(64) static DWORD LastTickCount; // the count just before timer thread goes to sleep
+ DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) static DWORD LastTickCount; // the count just before timer thread goes to sleep
static BOOL InitCompletionPortThreadpool; // flag indicating whether completion port threadpool has been initialized
static HANDLE GlobalCompletionPort; // used for binding io completions on file handles
@@ -1319,20 +1320,20 @@ private:
SVAL_DECL(LONG,MinLimitTotalCPThreads);
SVAL_DECL(LONG,MaxFreeCPThreads); // = MaxFreeCPThreadsPerCPU * Number of CPUS
- DECLSPEC_ALIGN(64) static LONG GateThreadStatus; // See GateThreadStatus enumeration
+ DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) static LONG GateThreadStatus; // See GateThreadStatus enumeration
static Volatile<LONG> NumCPInfrastructureThreads; // number of threads currently busy handling draining cycle
SVAL_DECL(LONG,cpuUtilization);
static LONG cpuUtilizationAverage;
- DECLSPEC_ALIGN(64) static RecycledListsWrapper RecycledLists;
+ DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) static RecycledListsWrapper RecycledLists;
#ifdef _DEBUG
static DWORD TickCountAdjustment; // add this value to value returned by GetTickCount
#endif
- DECLSPEC_ALIGN(64) static int offset_counter;
+ DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) static int offset_counter;
static const int offset_multiplier = 128;
};