summaryrefslogtreecommitdiff
path: root/src/vm
diff options
context:
space:
mode:
Diffstat (limited to 'src/vm')
-rw-r--r--src/vm/CMakeLists.txt25
-rw-r--r--src/vm/amd64/CrtHelpers.asm734
-rw-r--r--src/vm/amd64/asmconstants.h4
-rw-r--r--src/vm/amd64/cgencpu.h6
-rw-r--r--src/vm/amd64/excepamd64.cpp2
-rw-r--r--src/vm/amd64/jitinterfaceamd64.cpp2
-rw-r--r--src/vm/amd64/umthunkstub.S2
-rw-r--r--src/vm/appdomain.cpp53
-rw-r--r--src/vm/appdomain.hpp14
-rw-r--r--src/vm/arm/asmconstants.h4
-rw-r--r--src/vm/arm/cgencpu.h6
-rw-r--r--src/vm/arm/stubs.cpp2
-rw-r--r--src/vm/assemblynative.cpp35
-rw-r--r--src/vm/assemblynative.hpp1
-rw-r--r--src/vm/assemblyspec.hpp14
-rw-r--r--src/vm/callhelpers.cpp88
-rw-r--r--src/vm/callingconvention.h4
-rw-r--r--src/vm/ceeload.cpp30
-rw-r--r--src/vm/ceeload.h22
-rw-r--r--src/vm/ceemain.cpp37
-rw-r--r--src/vm/cgensys.h11
-rw-r--r--src/vm/class.cpp4
-rw-r--r--src/vm/class.h5
-rw-r--r--src/vm/class.inl2
-rw-r--r--src/vm/classcompat.cpp2
-rw-r--r--src/vm/classnames.h3
-rw-r--r--src/vm/clrprivbinderwinrt.cpp11
-rw-r--r--src/vm/clsload.hpp2
-rw-r--r--src/vm/codeman.cpp158
-rw-r--r--src/vm/codeman.h46
-rw-r--r--src/vm/comdelegate.cpp128
-rw-r--r--src/vm/comdependenthandle.cpp21
-rw-r--r--src/vm/comdependenthandle.h2
-rw-r--r--src/vm/commemoryfailpoint.cpp2
-rw-r--r--src/vm/commethodrental.cpp4
-rw-r--r--src/vm/commodule.cpp3
-rw-r--r--src/vm/common.h2
-rw-r--r--src/vm/compile.cpp64
-rw-r--r--src/vm/compile.h2
-rw-r--r--src/vm/comsynchronizable.cpp6
-rw-r--r--src/vm/comthreadpool.cpp17
-rw-r--r--src/vm/comutilnative.cpp62
-rw-r--r--src/vm/constrainedexecutionregion.cpp5
-rw-r--r--src/vm/constrainedexecutionregion.h3
-rw-r--r--src/vm/corhost.cpp10
-rw-r--r--src/vm/crossgen/CMakeLists.txt7
-rw-r--r--src/vm/crossgen_mscorlib/mscorlib_crossgen.nativeproj7
-rw-r--r--src/vm/crossgencompile.cpp4
-rw-r--r--src/vm/crst.cpp4
-rw-r--r--src/vm/customattribute.cpp84
-rw-r--r--src/vm/dac/dacwks.targets1
-rw-r--r--src/vm/dataimage.cpp2
-rw-r--r--src/vm/debugdebugger.cpp42
-rw-r--r--src/vm/debughelp.cpp21
-rw-r--r--src/vm/dllimport.cpp22
-rw-r--r--src/vm/dllimportcallback.cpp26
-rw-r--r--src/vm/dllimportcallback.h16
-rw-r--r--src/vm/domainfile.cpp4
-rw-r--r--src/vm/dwreport.cpp2
-rw-r--r--src/vm/ecalllist.h113
-rw-r--r--src/vm/eedbginterfaceimpl.cpp4
-rw-r--r--src/vm/eepolicy.cpp2
-rw-r--r--src/vm/eetoprofinterfaceimpl.cpp164
-rw-r--r--src/vm/eetoprofinterfaceimpl.h15
-rw-r--r--src/vm/eetoprofinterfaceimpl.inl6
-rw-r--r--src/vm/eetwain.cpp209
-rw-r--r--src/vm/encee.cpp15
-rw-r--r--src/vm/eventtrace.cpp37
-rw-r--r--src/vm/eventtracepriv.h5
-rw-r--r--src/vm/excep.cpp95
-rw-r--r--src/vm/excep.h7
-rw-r--r--src/vm/exceptionhandling.cpp2
-rw-r--r--src/vm/exceptmacros.h2
-rw-r--r--src/vm/exinfo.cpp8
-rw-r--r--src/vm/exinfo.h2
-rw-r--r--src/vm/finalizerthread.cpp28
-rw-r--r--src/vm/frames.cpp2
-rw-r--r--src/vm/frames.h8
-rw-r--r--src/vm/frameworkexceptionloader.cpp2
-rw-r--r--src/vm/gccover.cpp36
-rw-r--r--src/vm/gcenv.ee.cpp437
-rw-r--r--src/vm/gcenv.ee.h46
-rw-r--r--src/vm/gcenv.h2
-rw-r--r--src/vm/gcenv.os.cpp67
-rw-r--r--src/vm/gcheaputilities.cpp19
-rw-r--r--src/vm/gcheaputilities.h129
-rw-r--r--src/vm/gchelpers.cpp89
-rw-r--r--src/vm/gchost.cpp12
-rw-r--r--src/vm/gcinfodecoder.cpp197
-rw-r--r--src/vm/gcinterface.h (renamed from src/vm/gc.h)2
-rw-r--r--src/vm/gcstress.h12
-rw-r--r--src/vm/gdbjit.cpp2054
-rw-r--r--src/vm/gdbjit.h386
-rw-r--r--src/vm/gdbjithelpers.h2
-rw-r--r--src/vm/hash.cpp4
-rw-r--r--src/vm/i386/RedirectedHandledJITCase.asm2
-rw-r--r--src/vm/i386/asmconstants.h18
-rw-r--r--src/vm/i386/asmhelpers.S1140
-rw-r--r--src/vm/i386/asmhelpers.asm28
-rw-r--r--src/vm/i386/cgencpu.h9
-rw-r--r--src/vm/i386/cgenx86.cpp79
-rw-r--r--src/vm/i386/excepcpu.h31
-rw-r--r--src/vm/i386/excepx86.cpp86
-rw-r--r--src/vm/i386/gmsasm.S28
-rw-r--r--src/vm/i386/gmsx86.cpp136
-rw-r--r--src/vm/i386/jithelp.S749
-rw-r--r--src/vm/i386/jithelp.asm30
-rw-r--r--src/vm/i386/jitinterfacex86.cpp18
-rw-r--r--src/vm/i386/stublinkerx86.cpp27
-rw-r--r--src/vm/i386/stublinkerx86.h19
-rw-r--r--src/vm/i386/umthunkstub.S177
-rw-r--r--src/vm/i386/unixstubs.cpp106
-rw-r--r--src/vm/i386/virtualcallstubcpu.hpp2
-rw-r--r--src/vm/ilmarshalers.cpp2
-rw-r--r--src/vm/ilstubcache.cpp2
-rw-r--r--src/vm/ilstubresolver.cpp10
-rw-r--r--src/vm/ilstubresolver.h6
-rw-r--r--src/vm/interoputil.cpp2
-rw-r--r--src/vm/interpreter.cpp27
-rw-r--r--src/vm/interpreter.h2
-rw-r--r--src/vm/jithelpers.cpp32
-rw-r--r--src/vm/jitinterface.cpp456
-rw-r--r--src/vm/jitinterface.h24
-rw-r--r--src/vm/jitinterfacegen.cpp2
-rw-r--r--src/vm/marshalnative.cpp2
-rw-r--r--src/vm/marshalnative.h2
-rw-r--r--src/vm/mdaassistants.cpp4
-rw-r--r--src/vm/memberload.cpp2
-rw-r--r--src/vm/message.cpp2
-rw-r--r--src/vm/metasig.h3
-rw-r--r--src/vm/method.cpp4
-rw-r--r--src/vm/method.hpp18
-rw-r--r--src/vm/methodtable.cpp8
-rw-r--r--src/vm/methodtablebuilder.cpp47
-rw-r--r--src/vm/methodtablebuilder.h1
-rw-r--r--src/vm/microsoft.comservices_i.c16
-rw-r--r--src/vm/mlinfo.cpp4
-rw-r--r--src/vm/mscorlib.h36
-rw-r--r--src/vm/multicorejitplayer.cpp2
-rw-r--r--src/vm/nativeoverlapped.cpp24
-rw-r--r--src/vm/nativeoverlapped.h3
-rw-r--r--src/vm/object.cpp32
-rw-r--r--src/vm/object.h25
-rw-r--r--src/vm/prestub.cpp66
-rw-r--r--src/vm/profattach.cpp2
-rw-r--r--src/vm/profilinghelper.cpp2
-rw-r--r--src/vm/proftoeeinterfaceimpl.cpp368
-rw-r--r--src/vm/proftoeeinterfaceimpl.h24
-rw-r--r--src/vm/rcwwalker.cpp4
-rw-r--r--src/vm/readytoruninfo.cpp82
-rw-r--r--src/vm/reflectioninvocation.cpp8
-rw-r--r--src/vm/reflectioninvocation.h6
-rw-r--r--src/vm/rejit.cpp11
-rw-r--r--src/vm/rexcep.h4
-rw-r--r--src/vm/runtimecallablewrapper.cpp7
-rw-r--r--src/vm/safehandle.cpp4
-rw-r--r--src/vm/securityattributes.cpp7
-rw-r--r--src/vm/securitydeclarativecache.cpp9
-rw-r--r--src/vm/securityprincipal.h4
-rw-r--r--src/vm/siginfo.cpp2
-rw-r--r--src/vm/stacksampler.cpp10
-rw-r--r--src/vm/stacksampler.h4
-rw-r--r--src/vm/stackwalk.cpp8
-rw-r--r--src/vm/stackwalk.h4
-rw-r--r--src/vm/stubcache.h2
-rw-r--r--src/vm/stubgen.cpp39
-rw-r--r--src/vm/stubgen.h2
-rw-r--r--src/vm/stubhelpers.cpp12
-rw-r--r--src/vm/syncblk.cpp16
-rw-r--r--src/vm/syncclean.cpp2
-rw-r--r--src/vm/testhookmgr.cpp2
-rw-r--r--src/vm/threadpoolrequest.cpp4
-rw-r--r--src/vm/threads.cpp133
-rw-r--r--src/vm/threads.h16
-rw-r--r--src/vm/threadsuspend.cpp70
-rw-r--r--src/vm/typeparse.cpp6
-rw-r--r--src/vm/typeparse.h7
-rw-r--r--src/vm/util.cpp40
-rw-r--r--src/vm/vars.cpp9
-rw-r--r--src/vm/vars.hpp11
-rw-r--r--src/vm/virtualcallstub.cpp2
-rw-r--r--src/vm/vm.settings7
-rw-r--r--src/vm/win32threadpool.cpp22
-rw-r--r--src/vm/win32threadpool.h8
-rw-r--r--src/vm/winrtredirector.h2
-rw-r--r--src/vm/wks/wks.targets1
186 files changed, 8320 insertions, 2336 deletions
diff --git a/src/vm/CMakeLists.txt b/src/vm/CMakeLists.txt
index 6f17a90c1f..9fdddd6c26 100644
--- a/src/vm/CMakeLists.txt
+++ b/src/vm/CMakeLists.txt
@@ -70,6 +70,7 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON
formattype.cpp
fptrstubs.cpp
frames.cpp
+ gcheaputilities.cpp
genericdict.cpp
generics.cpp
hash.cpp
@@ -116,7 +117,6 @@ set(VM_SOURCES_DAC_AND_WKS_COMMON
virtualcallstub.cpp
win32threadpool.cpp
zapsig.cpp
- ${VM_SOURCES_GDBJIT}
)
if(FEATURE_READYTORUN)
@@ -155,7 +155,6 @@ set(VM_SOURCES_WKS
comthreadpool.cpp
comutilnative.cpp
comwaithandle.cpp
- constrainedexecutionregion.cpp
coverage.cpp
customattribute.cpp
custommarshalerinfo.cpp
@@ -172,7 +171,6 @@ set(VM_SOURCES_WKS
frameworkexceptionloader.cpp
gccover.cpp
gcenv.ee.cpp
- gcenv.os.cpp
gchelpers.cpp
genmeth.cpp
hosting.cpp
@@ -240,14 +238,27 @@ set(VM_SOURCES_WKS
typeparse.cpp
verifier.cpp
weakreferencenative.cpp
+ ${VM_SOURCES_GDBJIT}
)
+if(FEATURE_CER)
+ list(APPEND VM_SOURCES_WKS
+ constrainedexecutionregion.cpp
+ )
+endif(FEATURE_CER)
+
if(FEATURE_EVENT_TRACE)
list(APPEND VM_SOURCES_WKS
eventtrace.cpp
)
endif(FEATURE_EVENT_TRACE)
+if(NOT FEATURE_STANDALONE_GC)
+ list(APPEND VM_SOURCES_WKS
+ gcenv.os.cpp
+ )
+endif(NOT FEATURE_STANDALONE_GC)
+
if(WIN32)
set(VM_SOURCES_DAC_AND_WKS_WIN32
@@ -364,6 +375,13 @@ else(WIN32)
${ARCH_SOURCES_DIR}/umthunkstub.S
${ARCH_SOURCES_DIR}/virtualcallstubamd64.S
)
+ elseif(CLR_CMAKE_TARGET_ARCH_I386)
+ set(VM_SOURCES_WKS_ARCH_ASM
+ ${ARCH_SOURCES_DIR}/asmhelpers.S
+ ${ARCH_SOURCES_DIR}/jithelp.S
+ ${ARCH_SOURCES_DIR}/gmsasm.S
+ ${ARCH_SOURCES_DIR}/umthunkstub.S
+ )
elseif(CLR_CMAKE_TARGET_ARCH_ARM)
set(VM_SOURCES_WKS_ARCH_ASM
${ARCH_SOURCES_DIR}/asmhelpers.S
@@ -412,7 +430,6 @@ elseif(CLR_CMAKE_TARGET_ARCH_I386)
)
set(VM_SOURCES_WKS_ARCH
- ${ARCH_SOURCES_DIR}/jithelp.asm
${ARCH_SOURCES_DIR}/jitinterfacex86.cpp
${ARCH_SOURCES_DIR}/profiler.cpp
)
diff --git a/src/vm/amd64/CrtHelpers.asm b/src/vm/amd64/CrtHelpers.asm
index 6ec6e4d2a9..9d5b280558 100644
--- a/src/vm/amd64/CrtHelpers.asm
+++ b/src/vm/amd64/CrtHelpers.asm
@@ -13,48 +13,19 @@
; ***********************************************************************
include AsmMacros.inc
-include asmconstants.inc
-; JIT_MemSet/JIT_MemCpy
-;
-; It is IMPORANT that the exception handling code is able to find these guys
-; on the stack, but to keep them from being tailcalled by VC++ we need to turn
-; off optimization and it ends up being a wasteful implementation.
-;
-; Hence these assembly helpers.
-;
-
-
-;***
-;memset.asm - set a section of memory to all one byte
-;
-; Licensed to the .NET Foundation under one or more agreements.
-; The .NET Foundation licenses this file to you under the MIT license.
-; See the LICENSE file in the project root for more information.;
-;
-;*******************************************************************************
-
-;***
;char *memset(dst, value, count) - sets "count" bytes at "dst" to "value"
;
;Purpose:
; Sets the first "count" bytes of the memory starting
; at "dst" to the character value "value".
;
-; Algorithm:
-; char *
-; memset (dst, value, count)
-; char *dst;
-; char value;
-; unsigned int count;
-; {
-; char *start = dst;
-;
-; while (count--)
-; *dst++ = value;
-; return(start);
-; }
-;
+;Algorithm:
+;Set dst based on count as follow
+; count [0, 16]: use 1/2/4/8 bytes width registers
+; count [16, 128]: use 16 bytes width registers (XMM) without loop
+; count [128, 512]: use 16 bytes width registers (XMM) with loops, unrolled 8 times
+; count [512, upper]: use rep stosb
;Entry:
; char *dst - pointer to memory to fill with value
; char value - value to put in dst bytes
@@ -69,460 +40,291 @@ include asmconstants.inc
;
;*******************************************************************************
-CACHE_LIMIT_MEMSET equ 070000h ; limit for nontemporal fill
-
LEAF_ENTRY JIT_MemSet, _TEXT
- mov rax, rcx ; save destination address
- cmp r8, 8 ; check if 8 bytes to fill
- jb short mset40 ; if b, less than 8 bytes to fill
movzx edx, dl ; set fill pattern
- mov r9, 0101010101010101h ; replicate fill over 8 bytes
- imul rdx, r9 ;
- cmp r8, 64 ; check if 64 bytes to fill
- jb short mset20 ; if b, less than 64 bytes
-
-;
-; Large block - fill alignment bytes.
-;
-
-mset00: neg rcx ; compute bytes to alignment
- and ecx, 7 ;
- jz short mset10 ; if z, no alignment required
- sub r8, rcx ; adjust remaining bytes by alignment
- mov [rax], rdx ; fill alignment bytes
-mset10: add rcx, rax ; compute aligned destination address
-
-;
-; Attempt to fill 64-byte blocks
-;
-
- mov r9, r8 ; copy count of bytes remaining
- and r8, 63 ; compute remaining byte count
- shr r9, 6 ; compute number of 64-byte blocks
- test r9, r9 ; remove partial flag stall caused by shr
- jnz short mset70 ; if nz, 64-byte blocks to fill
-
-;
-; Fill 8-byte bytes.
-;
-
-mset20: mov r9, r8 ; copy count of bytes remaining
- and r8, 7 ; compute remaining byte count
- shr r9, 3 ; compute number of 8-byte blocks
- test r9, r9 ; remove partial flag stall caused by shr
- jz short mset40 ; if z, no 8-byte blocks
-
- align ; simpler way to align instrucitons
-
-mset30: mov [rcx], rdx ; fill 8-byte blocks
- add rcx, 8 ; advance to next 8-byte block
- dec r9 ; decrement loop count
- jnz short mset30 ; if nz, more 8-byte blocks
-
-;
-; Fill residual bytes.
-;
-
-mset40: test r8, r8 ; test if any bytes to fill
- jz short mset60 ; if z, no bytes to fill
-mset50: mov [rcx], dl ; fill byte
- inc rcx ; advance to next byte
- dec r8 ; decrement loop count
- jnz short mset50 ; if nz, more bytes to fill
-mset60:
- ; for some reason the assembler doesn't like the REPRET macro on the same line as a label
- REPRET ; return
-
-;
-; Fill 64-byte blocks.
-;
-
- align 16
-
- db 066h, 066h, 066h, 090h
- db 066h, 066h, 090h
-
-mset70: cmp r9, CACHE_LIMIT_MEMSET / 64 ; check if large fill
- jae short mset90 ; if ae, large fill
-mset80: mov [rcx], rdx ; fill 64-byte block
- mov 8[rcx], rdx ;
- mov 16[rcx], rdx ;
- add rcx, 64 ; advance to next block
- mov (24 - 64)[rcx], rdx ;
- mov (32 - 64)[rcx], rdx ;
- dec r9 ; decrement loop count
- mov (40 - 64)[rcx], rdx ;
- mov (48 - 64)[rcx], rdx ;
- mov (56 - 64)[rcx], rdx ;
- jnz short mset80 ; if nz, more 64-byte blocks
- jmp short mset20 ; finish in common code
-
-;
-; Fill 64-byte blocks nontemporal.
-;
-
- align
-
-mset90: movnti [rcx], rdx ; fill 64-byte block
- movnti 8[rcx], rdx ;
- movnti 16[rcx], rdx ;
- add rcx, 64 ; advance to next block
- movnti (24 - 64)[rcx], rdx ;
- movnti (32 - 64)[rcx], rdx ;
- dec r9 ; decrement loop count
- movnti (40 - 64)[rcx], rdx ;
- movnti (48 - 64)[rcx], rdx ;
- movnti (56 - 64)[rcx], rdx ;
- jnz short mset90 ; if nz, move 64-byte blocks
- lock or byte ptr [rsp], 0 ; flush data to memory
- jmp mset20 ; finish in common code
+ mov r9, 0101010101010101h
+ imul rdx, r9 ; rdx is 8 bytes filler
+
+ cmp r8, 16
+ jbe mset04
+
+ cmp r8, 512
+ jbe mset00
+
+ ; count > 512
+ mov r10, rcx ; save dst address
+ mov r11, rdi ; save rdi
+ mov eax, edx ; eax is value
+ mov rdi, rcx ; rdi is dst
+ mov rcx, r8 ; rcx is count
+ rep stosb
+ mov rdi, r11 ; restore rdi
+ mov rax, r10
+ ret
+
+ align 16
+mset00: mov rax, rcx ; save dst address
+ movd xmm0, rdx
+ punpcklbw xmm0, xmm0 ; xmm0 is 16 bytes filler
+
+ cmp r8, 128
+ jbe mset02
+
+ ; count > 128 && count <= 512
+ mov r9, r8
+ shr r9, 7 ; count/128
+
+ align 16
+mset01: movdqu [rcx], xmm0
+ movdqu 16[rcx], xmm0
+ movdqu 32[rcx], xmm0
+ movdqu 48[rcx], xmm0
+ movdqu 64[rcx], xmm0
+ movdqu 80[rcx], xmm0
+ movdqu 96[rcx], xmm0
+ movdqu 112[rcx], xmm0
+ add rcx, 128
+ dec r9
+ jnz mset01
+ and r8, 7fh ; and r8 with 0111 1111
+
+ ; the remainder is from 0 to 127
+ cmp r8, 16
+ jnbe mset02
+
+ ; the remainder <= 16
+ movdqu -16[rcx + r8], xmm0
+ ret
+
+ ; count > 16 && count <= 128 for mset02
+ align 16
+mset02: movdqu [rcx], xmm0
+ movdqu -16[rcx + r8], xmm0
+ cmp r8, 32
+ jbe mset03
+
+ ; count > 32 && count <= 64
+ movdqu 16[rcx], xmm0
+ movdqu -32[rcx + r8], xmm0
+ cmp r8, 64
+ jbe mset03
+
+ ; count > 64 && count <= 128
+ movdqu 32[rcx], xmm0
+ movdqu 48[rcx], xmm0
+ movdqu -48[rcx + r8], xmm0
+ movdqu -64[rcx + r8], xmm0
+mset03: ret
+
+ align 16
+mset04: mov rax, rcx ; save dst address
+ test r8b, 24 ; and r8b with 0001 1000
+ jz mset05
+
+ ; count >= 8 && count <= 16
+ mov [rcx], rdx
+ mov -8[rcx + r8], rdx
+ ret
+
+ align 16
+mset05: test r8b, 4 ; and r8b with 0100
+ jz mset06
+
+ ; count >= 4 && count < 8
+ mov [rcx], edx
+ mov -4[rcx + r8], edx
+ ret
+
+ ; count >= 0 && count < 4
+ align 16
+mset06: test r8b, 1 ; and r8b with 0001
+ jz mset07
+ mov [rcx],dl
+mset07: test r8b, 2 ; and r8b with 0010
+ jz mset08
+ mov -2[rcx + r8], dx
+mset08: ret
LEAF_END_MARKED JIT_MemSet, _TEXT
-;*******************************************************************************
-; This ensures that atomic updates of aligned fields will stay atomic.
-;***
;JIT_MemCpy - Copy source buffer to destination buffer
;
;Purpose:
-;JIT_MemCpy - Copy source buffer to destination buffer
-;
-;Purpose:
-; JIT_MemCpy() copies a source memory buffer to a destination memory
-; buffer. This routine recognize overlapping buffers to avoid propogation.
-; For cases where propogation is not a problem, memcpy() can be used.
+; JIT_MemCpy() copies a source memory buffer to a destination memory
+; buffer. This routine recognize overlapping buffers to avoid propogation.
+; For cases where propogation is not a problem, memcpy() can be used.
+;
+;Algorithm:
+;Copy to destination based on count as follow
+; count [0, 64]: overlap check not needed
+; count [0, 16]: use 1/2/4/8 bytes width registers
+; count [16, 64]: use 16 bytes width registers (XMM) without loop
+; count [64, upper]: check overlap
+; non-overlap:
+; count [64, 512]: use 16 bytes width registers (XMM) with loops, unrolled 4 times
+; count [512, upper]: use rep movsb
+; overlap::
+; use 16 bytes width registers (XMM) with loops to copy from end to beginnig
;
;Entry:
-; void *dst = pointer to destination buffer
-; const void *src = pointer to source buffer
-; size_t count = number of bytes to copy
+; void *dst = pointer to destination buffer
+; const void *src = pointer to source buffer
+; size_t count = number of bytes to copy
;
;Exit:
-; Returns a pointer to the destination buffer in AX/DX:AX
+; Returns a pointer to the destination buffer
;
;Uses:
-; CX, DX
;
;Exceptions:
;*******************************************************************************
-; This ensures that atomic updates of aligned fields will stay atomic.
-
-CACHE_LIMIT_MEMMOV equ 040000h ; limit for nontemporal fill
-CACHE_BLOCK equ 01000h ; nontemporal move block size
-
LEAF_ENTRY JIT_MemCpy, _TEXT
- mov r11, rcx ; save destination address
- sub rdx, rcx ; compute offset to source buffer
- jb mmov10 ; if b, destination may overlap
- cmp r8, 8 ; check if 8 bytes to move
- jb short mcpy40 ; if b, less than 8 bytes to move
-
-;
-; Move alignment bytes.
-;
-
- test cl, 7 ; test if destination aligned
- jz short mcpy20 ; if z, destination aligned
- test cl, 1 ; test if byte move needed
- jz short mcpy00 ; if z, byte move not needed
- mov al, [rcx + rdx] ; move byte
- dec r8 ; decrement byte count
- mov [rcx], al ;
- inc rcx ; increment destination address
-mcpy00: test cl, 2 ; test if word move needed
- jz short mcpy10 ; if z, word move not needed
- mov ax, [rcx + rdx] ; move word
- sub r8, 2 ; reduce byte count
- mov [rcx], ax ;
- add rcx, 2 ; advance destination address
-mcpy10: test cl, 4 ; test if dword move needed
- jz short mcpy20 ; if z, dword move not needed
- mov eax, [rcx + rdx] ; move dword
- sub r8, 4 ; reduce byte count
- mov [rcx], eax ;
- add rcx, 4 ; advance destination address
-
-;
-; Attempt to move 32-byte blocks.
-;
-
-mcpy20: mov r9, r8 ; copy count of bytes remaining
- shr r9, 5 ; compute number of 32-byte blocks
- test r9, r9 ; v-liti, remove partial flag stall caused by shr
- jnz short mcpy60 ; if nz, 32-byte blocks to fill
-
- align
-;
-; Move 8-byte blocks.
-;
-
-mcpy25: mov r9, r8 ; copy count of bytes remaining
- shr r9, 3 ; compute number of 8-byte blocks
- test r9, r9 ; v-liti, remove partial flag stall caused by shr
- jz short mcpy40 ; if z, no 8-byte blocks
- align
-
-mcpy30: mov rax, [rcx + rdx] ; move 8-byte blocks
- mov [rcx], rax ;
- add rcx, 8 ; advance destination address
- dec r9 ; decrement loop count
- jnz short mcpy30 ; if nz, more 8-byte blocks
- and r8, 7 ; compute remaining byte count
-
-;
-; Test for residual bytes.
-;
-
-mcpy40: test r8, r8 ; test if any bytes to move
- jnz short mcpy50 ; if nz, residual bytes to move
- mov rax, r11 ; set destination address
- ret ;
-
-;
-; Move residual bytes.
-;
-
- align
-
-mcpy50: mov al, [rcx + rdx] ; move byte
- mov [rcx], al ;
- inc rcx ; increment destiantion address
- dec r8 ; decrement loop count
- jnz short mcpy50 ; if nz, more bytes to fill
- mov rax, r11 ; set destination address
- ret ; return
+ mov rax, rcx ; save dst address
+ cmp r8, 16
+ jbe mcpy02
+
+ cmp r8, 64
+ jnbe mcpy07
+
+ ; count > 16 && count <= 64
+ align 16
+mcpy00: movdqu xmm0, [rdx]
+ movdqu xmm1, -16[rdx + r8] ; save 16 to 32 bytes src
+ cmp r8, 32
+ jbe mcpy01
+
+ movdqu xmm2, 16[rdx]
+ movdqu xmm3, -32[rdx + r8] ; save 32 to 64 bytes src
+
+ ;count > 32 && count <= 64
+ movdqu 16[rcx], xmm2
+ movdqu -32[rcx + r8], xmm3
+
+ ;count > 16 && count <= 32
+mcpy01: movdqu [rcx], xmm0
+ movdqu -16[rcx + r8], xmm1
+ ret
+
+ ; count <= 16
+ align 16
+mcpy02: test r8b, 24 ; test count with 0001 1000
+ jz mcpy03
+ ; count >= 8 && count <= 16
+ mov r9, [rdx]
+ mov r10, -8[rdx + r8]
+ mov [rcx], r9
+ mov -8[rcx + r8], r10
+ ret
+
+ align 16
+mcpy03: test r8b, 4 ; test count with 0100
+ jz mcpy04
+ ; count >= 4 && count < 8
+ mov r9d, [rdx]
+ mov r10d, -4[rdx + r8]
+ mov [rcx], r9d
+ mov -4[rcx + r8], r10d
+ ret
+
+ ; count >= 0 && count < 4
+ align 16
+mcpy04: test r8, r8
+ jz mcpy06 ; count == 1/2/3
+ mov r9b, [rdx] ; save the first byte
+
+ test r8b, 2 ; test count with 0010
+ jz mcpy05
+ mov r10w, -2[rdx + r8]
+ mov -2[rcx + r8], r10w
+mcpy05: mov [rcx], r9b
+mcpy06: ret
+
+ align 16
+ ; count > 64, we need to check overlap
+mcpy07: mov r9, rdx ; r9 is src address
+ sub r9, rcx ; if src - dst < 0 jump to mcpy11
+ jb mcpy11 ; if b, destination may overlap
+
+mcpy08: cmp r8, 512
+ jnbe mcpy10
+
+ ; count > 64 && count <= 512
+ mov r9, r8
+ shr r9, 6 ; count/64
+
+ align 16
+mcpy09: movdqu xmm0, [rdx]
+ movdqu xmm1, 16[rdx]
+ movdqu xmm2, 32[rdx]
+ movdqu xmm3, 48[rdx]
+ movdqu [rcx], xmm0
+ movdqu 16[rcx], xmm1
+ movdqu 32[rcx], xmm2
+ movdqu 48[rcx], xmm3
+ add rdx, 64
+ add rcx, 64
+ dec r9
+ jnz mcpy09
+
+ ; the remainder is from 0 to 63
+ and r8, 3fh ; and with 0011 1111
+ cmp r8, 16
+ jnbe mcpy00
+
+ ; the remainder <= 16
+ jmp mcpy02
+ ret
+
+ ; count > 512
+ align 16
+mcpy10: mov r10, rdi ; save rdi
+ mov r11, rsi ; save rsi
+ mov rdi, rcx ; rdi is dst
+ mov rsi, rdx ; rsi is src
+ mov rcx, r8 ; rcx is count
+ rep movsb ; mov from rsi to rdi
+ mov rsi, r11 ; restore rsi
+ mov rdi, r10 ; restore rdi
+ ret
-;
-; Move 32 byte blocks
-;
-
- align 16
-
- db 066h, 066h, 066h, 090h
- db 066h, 066h, 090h
-
-mcpy60: cmp r9, CACHE_LIMIT_MEMMOV / 32 ; check if large move
- jae short mcpy80 ; if ae, large move
-mcpy70: mov rax, [rcx + rdx] ; move 32-byte block
- mov r10, 8[rcx + rdx] ;
- add rcx, 32 ; advance destination address
- mov (-32)[rcx], rax ;
- mov (-24)[rcx], r10 ;
- mov rax, (-16)[rcx + rdx] ;
- mov r10, (-8)[rcx + rdx] ;
- dec r9 ;
- mov (-16)[rcx], rax ;
- mov (-8)[rcx], r10 ;
- jnz short mcpy70 ; if nz, more 32-byte blocks
- and r8, 31 ; compute remaining byte count
- jmp mcpy25 ;
-
-;
-; Move 64-byte blocks nontemporal.
-;
-
- align
-
- db 066h, 090h
-
-mcpy80: cmp rdx, CACHE_BLOCK ; check if cache block spacing
- jb short mcpy70 ; if b, not cache block spaced
-mcpy81: mov eax, CACHE_BLOCK / 128 ; set loop count
-mcpy85: prefetchnta [rcx + rdx] ; prefetch 128 bytes
- prefetchnta 64[rcx + rdx] ;
- add rcx, 128 ; advance source address
- dec eax ; decrement loop count
- jnz short mcpy85 ; if nz, more to prefetch
- sub rcx, CACHE_BLOCK ; reset source address
- mov eax, CACHE_BLOCK / 64 ; set loop count
-mcpy90: mov r9, [rcx + rdx] ; move 64-byte block
- mov r10, 8[rcx + rdx] ;
- movnti [rcx], r9 ;
- movnti 8[rcx], r10 ;
- mov r9, 16[rcx + rdx] ;
- mov r10, 24[rcx + rdx] ;
- movnti 16[rcx], r9 ;
- movnti 24[rcx], r10 ;
- mov r9, 32[rcx + rdx] ;
- mov r10, 40[rcx + rdx] ;
- add rcx, 64 ; advance destination address
- movnti (32 - 64)[rcx], r9 ;
- movnti (40 - 64)[rcx], r10 ;
- mov r9, (48 - 64)[rcx + rdx] ;
- mov r10, (56 - 64)[rcx + rdx] ;
- dec eax ;
- movnti (48 - 64)[rcx], r9 ;
- movnti (56 - 64)[rcx], r10 ;
- jnz short mcpy90 ; if nz, more 32-byte blocks
- sub r8, CACHE_BLOCK ; reduce remaining length
- cmp r8, CACHE_BLOCK ; check if cache block remains
- jae mcpy81 ; if ae, cache block remains
- lock or byte ptr [rsp], 0 ; flush data to memory
- jmp mcpy20 ;
-
-;
; The source address is less than the destination address.
-;
-
- align
-
- db 066h, 066h, 066h, 090h
- db 066h, 066h, 066h, 090h
- db 066h, 090h
-
-mmov10: add rcx, r8 ; compute ending destination address
- cmp r8, 8 ; check if 8 bytes to move
- jb short mmov60 ; if b, less than 8 bytes to move
-
-;
-; Move alignment bytes.
-;
-
- test cl, 7 ; test if destination aligned
- jz short mmov30 ; if z, destination aligned
- test cl, 1 ; test if byte move needed
- jz short mmov15 ; if z, byte move not needed
- dec rcx ; decrement destination address
- mov al, [rcx + rdx] ; move byte
- dec r8 ; decrement byte count
- mov [rcx], al ;
-mmov15: test cl, 2 ; test if word move needed
- jz short mmov20 ; if z, word move not needed
- sub rcx, 2 ; reduce destination address
- mov ax, [rcx + rdx] ; move word
- sub r8, 2 ; reduce byte count
- mov [rcx], ax ;
-mmov20: test cl, 4 ; test if dword move needed
- jz short mmov30 ; if z, dword move not needed
- sub rcx, 4 ; reduce destination address
- mov eax, [rcx + rdx] ; move dword
- sub r8, 4 ; reduce byte count
- mov [rcx], eax ;
-
-;
-; Attempt to move 32-byte blocks
-;
-
-mmov30: mov r9, r8 ; copy count of bytes remaining
- shr r9, 5 ; compute number of 32-byte blocks
- test r9, r9 ; v-liti, remove partial flag stall caused by shr
- jnz short mmov80 ; if nz, 32-byte blocks to fill
-
-;
-; Move 8-byte blocks.
-;
- align
-
-mmov40: mov r9, r8 ; copy count of bytes remaining
- shr r9, 3 ; compute number of 8-byte blocks
- test r9, r9 ; v-liti, remove partial flag stall caused by shr
- jz short mmov60 ; if z, no 8-byte blocks
-
- align
-
-mmov50: sub rcx, 8 ; reduce destination address
- mov rax, [rcx + rdx] ; move 8-byte blocks
- dec r9 ; decrement loop count
- mov [rcx], rax ;
- jnz short mmov50 ; if nz, more 8-byte blocks
- and r8, 7 ; compute remaining byte count
-
-;
-; Test for residual bytes.
-;
-
-mmov60: test r8, r8 ; test if any bytes to move
- jnz short mmov70 ; if nz, residual bytes to move
- mov rax, r11 ; set destination address
- ret ;
-
-;
-; Move residual bytes.
-;
-
- align
-mmov70: dec rcx ; decrement destination address
- mov al, [rcx + rdx] ; move byte
- dec r8 ; decrement loop count
- mov [rcx], al ;
- jnz short mmov70 ; if nz, more bytes to fill
- mov rax, r11 ; set destination address
- ret ; return
-
-;
-; Move 32 byte blocks
-;
-
- align 16
-
- db 066h, 066h, 066h, 090h
- db 066h, 066h, 090h
-
-mmov80: cmp r9, CACHE_LIMIT_MEMMOV / 32 ; check if large move
- jae short mmov93 ; if ae, large move
-mmov90: mov rax, (-8)[rcx + rdx] ; move 32-byte block
- mov r10, (-16)[rcx + rdx] ;
- sub rcx, 32 ; reduce destination address
- mov 24[rcx], rax ;
- mov 16[rcx], r10 ;
- mov rax, 8[rcx + rdx] ;
- mov r10, [rcx + rdx] ;
- dec r9 ;
- mov 8[rcx], rax ;
- mov [rcx], r10 ;
- jnz short mmov90 ; if nz, more 32-byte blocks
- and r8, 31 ; compute remaining byte count
- jmp mmov40 ;
-
-;
-; Move 64-byte blocks nontemporal.
-;
-
- align
-
- db 066h, 090h
-
-mmov93: cmp rdx, -CACHE_BLOCK ; check if cache block spacing
- ja short mmov90 ; if a, not cache block spaced
-mmov94: mov eax, CACHE_BLOCK / 128 ; set loop count
-mmov95: sub rcx, 128 ; reduce destination address
- prefetchnta [rcx + rdx] ; prefetch 128 bytes
- prefetchnta 64[rcx + rdx] ;
- dec eax ; decrement loop count
- jnz short mmov95 ; if nz, more to prefetch
- add rcx, CACHE_BLOCK ; reset source address
- mov eax, CACHE_BLOCK / 64 ; set loop count
-mmov97: mov r9, (-8)[rcx + rdx] ; move 64-byte block
- mov r10, (-16)[rcx + rdx] ;
- movnti (-8)[rcx], r9 ;
- movnti (-16)[rcx], r10 ;
- mov r9, (-24)[rcx + rdx] ;
- mov r10, (-32)[rcx + rdx] ;
- movnti (-24)[rcx], r9 ;
- movnti (-32)[rcx], r10 ;
- mov r9, (-40)[rcx + rdx] ;
- mov r10, (-48)[rcx + rdx] ;
- sub rcx, 64 ; reduce destination address
- movnti (64 - 40)[rcx], r9 ;
- movnti (64 - 48)[rcx], r10 ;
- mov r9, (64 - 56)[rcx + rdx] ;
- mov r10, (64 - 64)[rcx + rdx] ;
- dec eax ; decrement loop count
- movnti (64 - 56)[rcx], r9 ;
- movnti (64 - 64)[rcx], r10 ;
- jnz short mmov97 ; if nz, more 32-byte blocks
- sub r8, CACHE_BLOCK ; reduce remaining length
- cmp r8, CACHE_BLOCK ; check if cache block remains
- jae mmov94 ; if ae, cache block remains
- lock or byte ptr [rsp], 0 ; flush data to memory
- jmp mmov30 ;
+ align 16
+mcpy11: add r9, r8 ; src - dst + count
+ cmp r9, 0 ; src + count < = dst jump to mcpy08
+ jle mcpy08
+
+ lea r9, [rdx + r8] ; r9 is the src + count
+ lea r10, [rcx + r8] ; r10 is the dst + count
+
+ mov r11, r8
+ shr r11, 6 ; count/64
+
+ ; count > 64
+ align 16
+mcpy12: movdqu xmm0, -16[r9]
+ movdqu xmm1, -32[r9]
+ movdqu xmm2, -48[r9]
+ movdqu xmm3, -64[r9]
+ movdqu -16[r10], xmm0
+ movdqu -32[r10], xmm1
+ movdqu -48[r10], xmm2
+ movdqu -64[r10], xmm3
+ sub r9, 64
+ sub r10, 64
+ dec r11
+ jnz mcpy12
+
+ ; the remainder is from 0 to 63
+ and r8, 3fh ; and with 0011 1111
+ cmp r8, 16
+ jnbe mcpy00
+
+ ; the remainder <= 16
+ jmp mcpy02
LEAF_END_MARKED JIT_MemCpy, _TEXT
-
-
- end
-
+ end \ No newline at end of file
diff --git a/src/vm/amd64/asmconstants.h b/src/vm/amd64/asmconstants.h
index 32b23c83c3..ad90dd17ad 100644
--- a/src/vm/amd64/asmconstants.h
+++ b/src/vm/amd64/asmconstants.h
@@ -164,10 +164,10 @@ ASMCONSTANTS_C_ASSERT(OFFSETOF__Thread__m_ThreadId
== offsetof(Thread, m_ThreadId));
#define OFFSET__Thread__m_alloc_context__alloc_ptr 0x60
-ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_ptr == offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr));
+ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_ptr == offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_ptr));
#define OFFSET__Thread__m_alloc_context__alloc_limit 0x68
-ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_limit == offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_limit));
+ASMCONSTANTS_C_ASSERT(OFFSET__Thread__m_alloc_context__alloc_limit == offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_limit));
#define OFFSETOF__ThreadExceptionState__m_pCurrentTracker 0x000
ASMCONSTANTS_C_ASSERT(OFFSETOF__ThreadExceptionState__m_pCurrentTracker
diff --git a/src/vm/amd64/cgencpu.h b/src/vm/amd64/cgencpu.h
index 258ac38915..769f4029ee 100644
--- a/src/vm/amd64/cgencpu.h
+++ b/src/vm/amd64/cgencpu.h
@@ -65,14 +65,16 @@ EXTERN_C void FastCallFinalizeWorker(Object *obj, PCODE funcPtr);
#define CACHE_LINE_SIZE 64 // Current AMD64 processors have 64-byte cache lines as per AMD64 optmization manual
#define LOG2SLOT LOG2_PTRSIZE
-#define ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE 8 // bytes
-#define ENREGISTERED_PARAMTYPE_MAXSIZE 8 // bytes
#ifdef UNIX_AMD64_ABI
+#define ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE 16 // bytes
+#define ENREGISTERED_PARAMTYPE_MAXSIZE 16 // bytes
#define ENREGISTERED_RETURNTYPE_MAXSIZE 16 // bytes
#define CALLDESCR_ARGREGS 1 // CallDescrWorker has ArgumentRegister parameter
#define CALLDESCR_FPARGREGS 1 // CallDescrWorker has FloatArgumentRegisters parameter
#else
+#define ENREGISTERED_RETURNTYPE_INTEGER_MAXSIZE 8 // bytes
+#define ENREGISTERED_PARAMTYPE_MAXSIZE 8 // bytes
#define ENREGISTERED_RETURNTYPE_MAXSIZE 8 // bytes
#define COM_STUBS_SEPARATE_FP_LOCATIONS
#define CALLDESCR_REGTYPEMAP 1
diff --git a/src/vm/amd64/excepamd64.cpp b/src/vm/amd64/excepamd64.cpp
index 2fc553a987..d4248e7b07 100644
--- a/src/vm/amd64/excepamd64.cpp
+++ b/src/vm/amd64/excepamd64.cpp
@@ -21,7 +21,7 @@
#include "comutilnative.h"
#include "sigformat.h"
#include "siginfo.hpp"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "eedbginterfaceimpl.h" //so we can clearexception in COMPlusThrow
#include "perfcounters.h"
#include "asmconstants.h"
diff --git a/src/vm/amd64/jitinterfaceamd64.cpp b/src/vm/amd64/jitinterfaceamd64.cpp
index 39c2e05c2f..d5dec8e6e8 100644
--- a/src/vm/amd64/jitinterfaceamd64.cpp
+++ b/src/vm/amd64/jitinterfaceamd64.cpp
@@ -390,7 +390,7 @@ bool WriteBarrierManager::NeedDifferentWriteBarrier(bool bReqUpperBoundsCheck, W
}
#endif
- writeBarrierType = GCHeap::IsServerHeap() ? WRITE_BARRIER_SVR64 : WRITE_BARRIER_PREGROW64;
+ writeBarrierType = GCHeapUtilities::IsServerHeap() ? WRITE_BARRIER_SVR64 : WRITE_BARRIER_PREGROW64;
continue;
case WRITE_BARRIER_PREGROW64:
diff --git a/src/vm/amd64/umthunkstub.S b/src/vm/amd64/umthunkstub.S
index e388f15490..3e60bedb3f 100644
--- a/src/vm/amd64/umthunkstub.S
+++ b/src/vm/amd64/umthunkstub.S
@@ -83,7 +83,7 @@ LOCAL_LABEL(HaveThread):
mov r12, rax // r12 <- Thread*
- //FailFast if a native callable method invoked via ldftn and calli.
+ //FailFast if a native callable method is invoked via ldftn and calli.
cmp dword ptr [r12 + OFFSETOF__Thread__m_fPreemptiveGCDisabled], 1
jz LOCAL_LABEL(InvalidTransition)
diff --git a/src/vm/appdomain.cpp b/src/vm/appdomain.cpp
index 0ec2c5f2fc..34da344c94 100644
--- a/src/vm/appdomain.cpp
+++ b/src/vm/appdomain.cpp
@@ -12,7 +12,7 @@
#include "strongnameinternal.h"
#include "excep.h"
#include "eeconfig.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "eventtrace.h"
#ifdef FEATURE_FUSION
#include "assemblysink.h"
@@ -2652,8 +2652,8 @@ void AppDomain::CreateADUnloadStartEvent()
// If the thread is in cooperative mode, it must have been suspended for the GC so a delete
// can't happen.
- _ASSERTE(GCHeap::IsGCInProgress() &&
- GCHeap::IsServerHeap() &&
+ _ASSERTE(GCHeapUtilities::IsGCInProgress() &&
+ GCHeapUtilities::IsServerHeap() &&
IsGCSpecialThread());
SystemDomain* sysDomain = SystemDomain::System();
@@ -2691,7 +2691,7 @@ void SystemDomain::ResetADSurvivedBytes()
}
CONTRACT_END;
- _ASSERTE(GCHeap::IsGCInProgress());
+ _ASSERTE(GCHeapUtilities::IsGCInProgress());
SystemDomain* sysDomain = SystemDomain::System();
if (sysDomain)
@@ -2870,6 +2870,11 @@ void SystemDomain::LoadBaseSystemClasses()
// the SZArrayHelper class here.
g_pSZArrayHelperClass = MscorlibBinder::GetClass(CLASS__SZARRAYHELPER);
+#ifdef FEATURE_SPAN_OF_T
+ // Load ByReference class
+ g_pByReferenceClass = MscorlibBinder::GetClass(CLASS__BYREFERENCE);
+#endif
+
// Load Nullable class
g_pNullableClass = MscorlibBinder::GetClass(CLASS__NULLABLE);
@@ -2943,6 +2948,7 @@ void SystemDomain::LoadBaseSystemClasses()
g_pExecutionEngineExceptionClass = MscorlibBinder::GetException(kExecutionEngineException);
g_pThreadAbortExceptionClass = MscorlibBinder::GetException(kThreadAbortException);
+#ifdef FEATURE_CER
// Used for determining whether a class has a critical finalizer
// To determine whether a class has a critical finalizer, we
// currently will simply see if it's parent class has a critical
@@ -2951,6 +2957,7 @@ void SystemDomain::LoadBaseSystemClasses()
// here.
g_pCriticalFinalizerObjectClass = MscorlibBinder::GetClass(CLASS__CRITICAL_FINALIZER_OBJECT);
_ASSERTE(g_pCriticalFinalizerObjectClass->HasCriticalFinalizer());
+#endif
// used by gc to handle predefined agility checking
g_pThreadClass = MscorlibBinder::GetClass(CLASS__THREAD);
@@ -2980,7 +2987,9 @@ void SystemDomain::LoadBaseSystemClasses()
// Load a special marker method used to detect Constrained Execution Regions
// at jit time.
+#ifdef FEATURE_CER
g_pPrepareConstrainedRegionsMethod = MscorlibBinder::GetMethod(METHOD__RUNTIME_HELPERS__PREPARE_CONSTRAINED_REGIONS);
+#endif
g_pExecuteBackoutCodeHelperMethod = MscorlibBinder::GetMethod(METHOD__RUNTIME_HELPERS__EXECUTE_BACKOUT_CODE_HELPER);
// Make sure that FCall mapping for Monitor.Enter is initialized. We need it in case Monitor.Enter is used only as JIT helper.
@@ -3824,7 +3833,7 @@ HRESULT SystemDomain::RunDllMain(HINSTANCE hInst, DWORD dwReason, LPVOID lpReser
return S_OK;
// ExitProcess is called while a thread is doing GC.
- if (dwReason == DLL_PROCESS_DETACH && GCHeap::IsGCInProgress())
+ if (dwReason == DLL_PROCESS_DETACH && GCHeapUtilities::IsGCInProgress())
return S_OK;
// ExitProcess is called on a thread that we don't know about
@@ -5107,7 +5116,7 @@ void AppDomain::Init()
// Ref_CreateHandleTableBucket, this is because AD::Init() can race with GC
// and once we add ourselves to the handle table map the GC can start walking
// our handles and calling AD::RecordSurvivedBytes() which touches ARM data.
- if (GCHeap::IsServerHeap())
+ if (GCHeapUtilities::IsServerHeap())
m_dwNumHeaps = CPUGroupInfo::CanEnableGCCPUGroups() ?
CPUGroupInfo::GetNumActiveProcessors() :
GetCurrentProcessCpuCount();
@@ -8084,6 +8093,13 @@ BOOL AppDomain::IsCached(AssemblySpec *pSpec)
return m_AssemblyCache.Contains(pSpec);
}
+#ifdef FEATURE_CORECLR
+void AppDomain::GetCacheAssemblyList(SetSHash<PTR_DomainAssembly>& assemblyList)
+{
+ CrstHolder holder(&m_DomainCacheCrst);
+ m_AssemblyCache.GetAllAssemblies(assemblyList);
+}
+#endif
PEAssembly* AppDomain::FindCachedFile(AssemblySpec* pSpec, BOOL fThrow /*=TRUE*/)
{
@@ -8241,7 +8257,7 @@ public:
}
else
{
- IfFailRet(FString::Utf8_Unicode(szName, bIsAscii, wzBuffer, cchBuffer));
+ IfFailRet(FString::Utf8_Unicode(szName, bIsAscii, wzBuffer, cchName));
if (pcchBuffer != nullptr)
{
*pcchBuffer = cchName;
@@ -11110,7 +11126,7 @@ void AppDomain::Unload(BOOL fForceUnload)
}
if(bForceGC)
{
- GCHeap::GetGCHeap()->GarbageCollect();
+ GCHeapUtilities::GetGCHeap()->GarbageCollect();
FinalizerThread::FinalizerThreadWait();
SetStage(STAGE_COLLECTED);
Close(); //NOTHROW!
@@ -11128,14 +11144,14 @@ void AppDomain::Unload(BOOL fForceUnload)
if (takeSnapShot)
{
char buffer[1024];
- sprintf(buffer, "vadump -p %d -o > vadump.%d", GetCurrentProcessId(), unloadCount);
+ sprintf_s(buffer, _countof(buffer), "vadump -p %d -o > vadump.%d", GetCurrentProcessId(), unloadCount);
system(buffer);
- sprintf(buffer, "umdh -p:%d -d -i:1 -f:umdh.%d", GetCurrentProcessId(), unloadCount);
+ sprintf_s(buffer, _countof(buffer), "umdh -p:%d -d -i:1 -f:umdh.%d", GetCurrentProcessId(), unloadCount);
system(buffer);
int takeDHSnapShot = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ADTakeDHSnapShot);
if (takeDHSnapShot)
{
- sprintf(buffer, "dh -p %d -s -g -h -b -f dh.%d", GetCurrentProcessId(), unloadCount);
+ sprintf_s(buffer, _countof(buffer), "dh -p %d -s -g -h -b -f dh.%d", GetCurrentProcessId(), unloadCount);
system(buffer);
}
}
@@ -11146,7 +11162,7 @@ void AppDomain::Unload(BOOL fForceUnload)
{
// do extra finalizer wait to remove any leftover sb entries
FinalizerThread::FinalizerThreadWait();
- GCHeap::GetGCHeap()->GarbageCollect();
+ GCHeapUtilities::GetGCHeap()->GarbageCollect();
FinalizerThread::FinalizerThreadWait();
LogSpewAlways("Done unload %3.3d\n", unloadCount);
DumpSyncBlockCache();
@@ -11548,7 +11564,7 @@ void AppDomain::ClearGCHandles()
SetStage(STAGE_HANDLETABLE_NOACCESS);
- GCHeap::GetGCHeap()->WaitUntilConcurrentGCComplete();
+ GCHeapUtilities::GetGCHeap()->WaitUntilConcurrentGCComplete();
// Keep async pin handles alive by moving them to default domain
HandleAsyncPinHandles();
@@ -12567,11 +12583,13 @@ AppDomain::RaiseAssemblyResolveEvent(
{
if (pSpec->GetParentAssembly() != NULL)
{
+#ifndef FEATURE_CORECLR
if ( pSpec->IsIntrospectionOnly()
#ifdef FEATURE_FUSION
|| pSpec->GetParentLoadContext() == LOADCTX_TYPE_UNKNOWN
#endif
)
+#endif // FEATURE_CORECLR
{
gc.AssemblyRef=pSpec->GetParentAssembly()->GetExposedAssemblyObject();
}
@@ -13516,8 +13534,8 @@ void SystemDomain::ProcessDelayedUnloadDomains()
}
CONTRACTL_END;
- int iGCRefPoint=GCHeap::GetGCHeap()->CollectionCount(GCHeap::GetGCHeap()->GetMaxGeneration());
- if (GCHeap::GetGCHeap()->IsConcurrentGCInProgress())
+ int iGCRefPoint=GCHeapUtilities::GetGCHeap()->CollectionCount(GCHeapUtilities::GetGCHeap()->GetMaxGeneration());
+ if (GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress())
iGCRefPoint--;
BOOL bAppDomainToCleanup = FALSE;
@@ -13695,7 +13713,6 @@ ULONG ADUnloadSink::Release()
if (ulRef == 0)
{
delete this;
- return 0;
}
return ulRef;
};
@@ -13735,8 +13752,8 @@ void AppDomain::EnumStaticGCRefs(promote_func* fn, ScanContext* sc)
}
CONTRACT_END;
- _ASSERTE(GCHeap::IsGCInProgress() &&
- GCHeap::IsServerHeap() &&
+ _ASSERTE(GCHeapUtilities::IsGCInProgress() &&
+ GCHeapUtilities::IsServerHeap() &&
IsGCSpecialThread());
AppDomain::AssemblyIterator asmIterator = IterateAssembliesEx((AssemblyIterationFlags)(kIncludeLoaded | kIncludeExecution));
diff --git a/src/vm/appdomain.hpp b/src/vm/appdomain.hpp
index 97e8438329..670f685fdc 100644
--- a/src/vm/appdomain.hpp
+++ b/src/vm/appdomain.hpp
@@ -30,6 +30,7 @@
#include "fptrstubs.h"
#include "ilstubcache.h"
#include "testhookmgr.h"
+#include "gcheaputilities.h"
#ifdef FEATURE_VERSIONING
#include "../binder/inc/applicationcontext.hpp"
#endif // FEATURE_VERSIONING
@@ -1295,7 +1296,7 @@ public:
{
WRAPPER_NO_CONTRACT;
OBJECTHANDLE h = ::CreateSizedRefHandle(
- m_hHandleTableBucket->pTable[GCHeap::IsServerHeap() ? (m_dwSizedRefHandles % m_iNumberOfProcessors) : GetCurrentThreadHomeHeapNumber()],
+ m_hHandleTableBucket->pTable[GCHeapUtilities::IsServerHeap() ? (m_dwSizedRefHandles % m_iNumberOfProcessors) : GetCurrentThreadHomeHeapNumber()],
object);
InterlockedIncrement((LONG*)&m_dwSizedRefHandles);
return h;
@@ -2430,6 +2431,9 @@ public:
LPCWSTR wszCodeBase);
#ifndef DACCESS_COMPILE // needs AssemblySpec
+
+ void GetCacheAssemblyList(SetSHash<PTR_DomainAssembly>& assemblyList);
+
//****************************************************************************************
// Returns and Inserts assemblies into a lookup cache based on the binding information
// in the AssemblySpec. There can be many AssemblySpecs to a single assembly.
@@ -4533,8 +4537,8 @@ public:
if (m_UnloadIsAsync)
{
pDomain->AddRef();
- int iGCRefPoint=GCHeap::GetGCHeap()->CollectionCount(GCHeap::GetGCHeap()->GetMaxGeneration());
- if (GCHeap::GetGCHeap()->IsGCInProgress())
+ int iGCRefPoint=GCHeapUtilities::GetGCHeap()->CollectionCount(GCHeapUtilities::GetGCHeap()->GetMaxGeneration());
+ if (GCHeapUtilities::IsGCInProgress())
iGCRefPoint++;
pDomain->SetGCRefPoint(iGCRefPoint);
}
@@ -4554,8 +4558,8 @@ public:
pAllocator->m_pLoaderAllocatorDestroyNext=m_pDelayedUnloadListOfLoaderAllocators;
m_pDelayedUnloadListOfLoaderAllocators=pAllocator;
- int iGCRefPoint=GCHeap::GetGCHeap()->CollectionCount(GCHeap::GetGCHeap()->GetMaxGeneration());
- if (GCHeap::GetGCHeap()->IsGCInProgress())
+ int iGCRefPoint=GCHeapUtilities::GetGCHeap()->CollectionCount(GCHeapUtilities::GetGCHeap()->GetMaxGeneration());
+ if (GCHeapUtilities::IsGCInProgress())
iGCRefPoint++;
pAllocator->SetGCRefPoint(iGCRefPoint);
}
diff --git a/src/vm/arm/asmconstants.h b/src/vm/arm/asmconstants.h
index 47ebb2d24d..93af04734e 100644
--- a/src/vm/arm/asmconstants.h
+++ b/src/vm/arm/asmconstants.h
@@ -225,10 +225,10 @@ ASMCONSTANTS_C_ASSERT(UnmanagedToManagedFrame__m_pvDatum == offsetof(UnmanagedTo
#ifndef CROSSGEN_COMPILE
#define Thread__m_alloc_context__alloc_limit 0x44
-ASMCONSTANTS_C_ASSERT(Thread__m_alloc_context__alloc_limit == offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_limit));
+ASMCONSTANTS_C_ASSERT(Thread__m_alloc_context__alloc_limit == offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_limit));
#define Thread__m_alloc_context__alloc_ptr 0x40
-ASMCONSTANTS_C_ASSERT(Thread__m_alloc_context__alloc_ptr == offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr));
+ASMCONSTANTS_C_ASSERT(Thread__m_alloc_context__alloc_ptr == offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_ptr));
#endif // CROSSGEN_COMPILE
#define Thread__m_fPreemptiveGCDisabled 0x08
diff --git a/src/vm/arm/cgencpu.h b/src/vm/arm/cgencpu.h
index 936fdabafb..63c578bb88 100644
--- a/src/vm/arm/cgencpu.h
+++ b/src/vm/arm/cgencpu.h
@@ -36,10 +36,12 @@ Stub * GenerateInitPInvokeFrameHelper();
EXTERN_C void checkStack(void);
+#define THUMB_CODE 1
+
#ifdef CROSSGEN_COMPILE
#define GetEEFuncEntryPoint(pfn) 0x1001
#else
-#define GetEEFuncEntryPoint(pfn) GFN_TADDR(pfn)
+#define GetEEFuncEntryPoint(pfn) (GFN_TADDR(pfn) | THUMB_CODE)
#endif
//**********************************************************************
@@ -306,8 +308,6 @@ inline PCODE decodeBackToBackJump(PCODE pBuffer)
#include "stublink.h"
struct ArrayOpScript;
-#define THUMB_CODE 1
-
inline BOOL IsThumbCode(PCODE pCode)
{
return (pCode & THUMB_CODE) != 0;
diff --git a/src/vm/arm/stubs.cpp b/src/vm/arm/stubs.cpp
index 0b069da47e..1309695f73 100644
--- a/src/vm/arm/stubs.cpp
+++ b/src/vm/arm/stubs.cpp
@@ -2650,7 +2650,7 @@ void InitJITHelpers1()
))
{
- _ASSERTE(GCHeap::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseAllocationContexts());
// If the TLS for Thread is low enough use the super-fast helpers
if (gThreadTLSIndex < TLS_MINIMUM_AVAILABLE)
{
diff --git a/src/vm/assemblynative.cpp b/src/vm/assemblynative.cpp
index 90c58fc59c..f372bcb349 100644
--- a/src/vm/assemblynative.cpp
+++ b/src/vm/assemblynative.cpp
@@ -807,6 +807,41 @@ Assembly* AssemblyNative::LoadFromPEImage(ICLRPrivBinder* pBinderContext, PEImag
RETURN pLoadedAssembly;
}
+/* static */
+void QCALLTYPE AssemblyNative::GetLoadedAssembliesInternal(QCall::ObjectHandleOnStack assemblies)
+{
+ QCALL_CONTRACT;
+
+ BEGIN_QCALL;
+
+ MethodTable * pAssemblyClass = MscorlibBinder::GetClass(CLASS__ASSEMBLY);
+
+ PTR_AppDomain pCurDomain = GetAppDomain();
+
+ SetSHash<PTR_DomainAssembly> assemblySet;
+ pCurDomain->GetCacheAssemblyList(assemblySet);
+ size_t nArrayElems = assemblySet.GetCount();
+ PTRARRAYREF AsmArray = NULL;
+
+ GCX_COOP();
+
+ GCPROTECT_BEGIN(AsmArray);
+ AsmArray = (PTRARRAYREF) AllocateObjectArray( (DWORD)nArrayElems, pAssemblyClass);
+ for(auto it = assemblySet.Begin(); it != assemblySet.End(); it++)
+ {
+ PTR_DomainAssembly assem = *it;
+ OBJECTREF o = (OBJECTREF)assem->GetExposedAssemblyObject();
+ _ASSERTE(o != NULL);
+ _ASSERTE(nArrayElems > 0);
+ AsmArray->SetAt(--nArrayElems, o);
+ }
+
+ assemblies.Set(AsmArray);
+
+ GCPROTECT_END();
+
+ END_QCALL;
+}
/* static */
void QCALLTYPE AssemblyNative::LoadFromPath(INT_PTR ptrNativeAssemblyLoadContext, LPCWSTR pwzILPath, LPCWSTR pwzNIPath, QCall::ObjectHandleOnStack retLoadedAssembly)
diff --git a/src/vm/assemblynative.hpp b/src/vm/assemblynative.hpp
index ca03239d3e..99f51e9837 100644
--- a/src/vm/assemblynative.hpp
+++ b/src/vm/assemblynative.hpp
@@ -252,6 +252,7 @@ public:
static BOOL QCALLTYPE OverrideDefaultAssemblyLoadContextForCurrentDomain(INT_PTR ptrNativeAssemblyLoadContext);
static BOOL QCALLTYPE CanUseAppPathAssemblyLoadContextInCurrentDomain();
static void QCALLTYPE LoadFromPath(INT_PTR ptrNativeAssemblyLoadContext, LPCWSTR pwzILPath, LPCWSTR pwzNIPath, QCall::ObjectHandleOnStack retLoadedAssembly);
+ static void QCALLTYPE GetLoadedAssembliesInternal(QCall::ObjectHandleOnStack assemblies);
static INT_PTR QCALLTYPE InternalLoadUnmanagedDllFromPath(LPCWSTR unmanagedLibraryPath);
static void QCALLTYPE LoadFromStream(INT_PTR ptrNativeAssemblyLoadContext, INT_PTR ptrAssemblyArray, INT32 cbAssemblyArrayLength, INT_PTR ptrSymbolArray, INT32 cbSymbolArrayLength, QCall::ObjectHandleOnStack retLoadedAssembly);
static Assembly* LoadFromPEImage(ICLRPrivBinder* pBinderContext, PEImage *pILImage, PEImage *pNIImage);
diff --git a/src/vm/assemblyspec.hpp b/src/vm/assemblyspec.hpp
index a7e9c0f203..d94a847124 100644
--- a/src/vm/assemblyspec.hpp
+++ b/src/vm/assemblyspec.hpp
@@ -682,6 +682,20 @@ class AssemblySpecBindingCache
WRAPPER_NO_CONTRACT;
return pSpec->Hash();
}
+
+#if defined(FEATURE_CORECLR) && !defined(DACCESS_COMPILE)
+ void GetAllAssemblies(SetSHash<PTR_DomainAssembly>& assemblyList)
+ {
+ PtrHashMap::PtrIterator i = m_map.begin();
+ while (!i.end())
+ {
+ AssemblyBinding *b = (AssemblyBinding*) i.GetValue();
+ if(!b->IsError() && b->GetAssembly() != NULL)
+ assemblyList.AddOrReplace(b->GetAssembly());
+ ++i;
+ }
+ }
+#endif // defined(FEATURE_CORECLR) && !defined(DACCESS_COMPILE)
static BOOL CompareSpecs(UPTR u1, UPTR u2);
};
diff --git a/src/vm/callhelpers.cpp b/src/vm/callhelpers.cpp
index 9152f71d79..addd5192da 100644
--- a/src/vm/callhelpers.cpp
+++ b/src/vm/callhelpers.cpp
@@ -16,6 +16,8 @@
// To include declaration of "SignatureNative"
#include "runtimehandles.h"
+#include "invokeutil.h"
+#include "argdestination.h"
#if defined(FEATURE_MULTICOREJIT) && defined(_DEBUG)
@@ -536,7 +538,7 @@ void MethodDescCallSite::CallTargetWorker(const ARG_SLOT *pArguments, ARG_SLOT *
}
#endif
- int ofs;
+ int ofs;
for (; TransitionBlock::InvalidOffset != (ofs = m_argIt.GetNextOffset()); arg++)
{
#ifdef CALLDESCR_REGTYPEMAP
@@ -567,46 +569,56 @@ void MethodDescCallSite::CallTargetWorker(const ARG_SLOT *pArguments, ARG_SLOT *
}
#endif // CHECK_APP_DOMAIN_LEAKS
-#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
- _ASSERTE(ofs != TransitionBlock::StructInRegsOffset);
-#endif
- PVOID pDest = pTransitionBlock + ofs;
+ ArgDestination argDest(pTransitionBlock, ofs, m_argIt.GetArgLocDescForStructInRegs());
UINT32 stackSize = m_argIt.GetArgSize();
- switch (stackSize)
+ // We need to pass in a pointer, but be careful of the ARG_SLOT calling convention. We might already have a pointer in the ARG_SLOT.
+ PVOID pSrc = stackSize > sizeof(ARG_SLOT) ? (LPVOID)ArgSlotToPtr(pArguments[arg]) : (LPVOID)ArgSlotEndianessFixup((ARG_SLOT*)&pArguments[arg], stackSize);
+
+#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
+ if (argDest.IsStructPassedInRegs())
+ {
+ TypeHandle th;
+ m_argIt.GetArgType(&th);
+
+ argDest.CopyStructToRegisters(pSrc, th.AsMethodTable()->GetNumInstanceFieldBytes(), 0);
+ }
+ else
+#endif // UNIX_AMD64_ABI && FEATURE_UNIX_AMD64_STRUCT_PASSING
{
- case 1:
- case 2:
- case 4:
- *((INT32*)pDest) = (INT32)pArguments[arg];
- break;
-
- case 8:
- *((INT64*)pDest) = pArguments[arg];
- break;
-
- default:
- // The ARG_SLOT contains a pointer to the value-type
-#ifdef ENREGISTERED_PARAMTYPE_MAXSIZE
- if (m_argIt.IsArgPassedByRef())
- {
- // We need to pass in a pointer, but be careful of the ARG_SLOT calling convention.
- // We might already have a pointer in the ARG_SLOT
- *(PVOID*)pDest = stackSize>sizeof(ARG_SLOT) ?
- (LPVOID)ArgSlotToPtr(pArguments[arg]) :
- (LPVOID)ArgSlotEndianessFixup((ARG_SLOT*)&pArguments[arg], stackSize);
- }
- else
-#endif // ENREGISTERED_PARAMTYPE_MAXSIZE
- if (stackSize>sizeof(ARG_SLOT))
- {
- CopyMemory(pDest, ArgSlotToPtr(pArguments[arg]), stackSize);
- }
- else
- {
- CopyMemory(pDest, (LPVOID) (&pArguments[arg]), stackSize);
- }
- break;
+ PVOID pDest = argDest.GetDestinationAddress();
+
+ switch (stackSize)
+ {
+ case 1:
+ case 2:
+ case 4:
+ *((INT32*)pDest) = (INT32)pArguments[arg];
+ break;
+
+ case 8:
+ *((INT64*)pDest) = pArguments[arg];
+ break;
+
+ default:
+ // The ARG_SLOT contains a pointer to the value-type
+ #ifdef ENREGISTERED_PARAMTYPE_MAXSIZE
+ if (m_argIt.IsArgPassedByRef())
+ {
+ *(PVOID*)pDest = pSrc;
+ }
+ else
+ #endif // ENREGISTERED_PARAMTYPE_MAXSIZE
+ if (stackSize > sizeof(ARG_SLOT))
+ {
+ CopyMemory(pDest, ArgSlotToPtr(pArguments[arg]), stackSize);
+ }
+ else
+ {
+ CopyMemory(pDest, (LPVOID) (&pArguments[arg]), stackSize);
+ }
+ break;
+ }
}
}
diff --git a/src/vm/callingconvention.h b/src/vm/callingconvention.h
index 3ef6be983a..c9a27c2371 100644
--- a/src/vm/callingconvention.h
+++ b/src/vm/callingconvention.h
@@ -1341,11 +1341,15 @@ void ArgIteratorTemplate<ARGITERATOR_BASE>::ComputeReturnFlags()
break;
case ELEMENT_TYPE_R4:
+#ifndef ARM_SOFTFP
flags |= sizeof(float) << RETURN_FP_SIZE_SHIFT;
+#endif
break;
case ELEMENT_TYPE_R8:
+#ifndef ARM_SOFTFP
flags |= sizeof(double) << RETURN_FP_SIZE_SHIFT;
+#endif
break;
case ELEMENT_TYPE_VALUETYPE:
diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp
index 464c147cb4..c95fbac16e 100644
--- a/src/vm/ceeload.cpp
+++ b/src/vm/ceeload.cpp
@@ -1656,6 +1656,7 @@ void Module::Destruct()
m_InstMethodHashTableCrst.Destroy();
m_ISymUnmanagedReaderCrst.Destroy();
+#ifdef FEATURE_CER
if (m_pCerPrepInfo)
{
_ASSERTE(m_pCerCrst != NULL);
@@ -1672,6 +1673,7 @@ void Module::Destruct()
}
if (m_pCerCrst)
delete m_pCerCrst;
+#endif // FEATURE_CER
if (m_debuggerSpecificData.m_pDynamicILCrst)
{
@@ -1702,8 +1704,10 @@ void Module::Destruct()
}
#ifdef FEATURE_PREJIT
+#ifdef FEATURE_CER
if (m_pCerNgenRootTable && (m_dwTransientFlags & M_CER_ROOT_TABLE_ON_HEAP))
delete m_pCerNgenRootTable;
+#endif
if (HasNativeImage())
{
@@ -3154,6 +3158,7 @@ BOOL Module::IsPreV4Assembly()
return !!(m_dwPersistedFlags & IS_PRE_V4_ASSEMBLY);
}
+#ifdef FEATURE_CER
DWORD Module::GetReliabilityContract()
{
CONTRACTL
@@ -3180,6 +3185,7 @@ DWORD Module::GetReliabilityContract()
return m_dwReliabilityContract;
}
+#endif // FEATURE_CER
ArrayDPTR(FixupPointer<PTR_MethodTable>) ModuleCtorInfo::GetGCStaticMTs(DWORD index)
{
@@ -3427,8 +3433,8 @@ void Module::EnumRegularStaticGCRefs(AppDomain* pAppDomain, promote_func* fn, Sc
}
CONTRACT_END;
- _ASSERTE(GCHeap::IsGCInProgress() &&
- GCHeap::IsServerHeap() &&
+ _ASSERTE(GCHeapUtilities::IsGCInProgress() &&
+ GCHeapUtilities::IsServerHeap() &&
IsGCSpecialThread());
@@ -6268,7 +6274,7 @@ Module *Module::GetModuleIfLoaded(mdFile kFile, BOOL onlyLoadedInAppDomain, BOOL
#ifndef DACCESS_COMPILE
#if defined(FEATURE_MULTIMODULE_ASSEMBLIES)
// check if actually loaded, unless happens during GC (GC works only with loaded assemblies)
- if (!GCHeap::IsGCInProgress() && onlyLoadedInAppDomain && pModule && !pModule->IsManifest())
+ if (!GCHeapUtilities::IsGCInProgress() && onlyLoadedInAppDomain && pModule && !pModule->IsManifest())
{
DomainModule *pDomainModule = pModule->FindDomainModule(GetAppDomain());
if (pDomainModule == NULL || !pDomainModule->IsLoaded())
@@ -9860,10 +9866,12 @@ void Module::PrepareTypesForSave(DataImage *image)
PrepareRemotableMethodInfo(pMT);
#endif // FEATURE_REMOTING
+#ifdef FEATURE_CER
// If this module defines any CriticalFinalizerObject derived classes,
// then we'll prepare these types for Constrained Execution Regions (CER) now.
// (Normally they're prepared at object instantiation time, a little too late for ngen).
PrepareCriticalType(pMT);
+#endif // FEATURE_CER
}
}
@@ -9947,7 +9955,9 @@ void Module::Save(DataImage *image)
// Cache values of all persisted flags computed from custom attributes
IsNoStringInterning();
IsRuntimeWrapExceptions();
+#ifdef FEATURE_CER
GetReliabilityContract();
+#endif
IsPreV4Assembly();
HasDefaultDllImportSearchPathsAttribute();
@@ -10302,10 +10312,12 @@ void Module::Save(DataImage *image)
m_nPropertyNameSet * sizeof(BYTE),
DataImage::ITEM_PROPERTY_NAME_SET);
+#ifdef FEATURE_CER
// Save Constrained Execution Region (CER) fixup information (used to eagerly fixup trees of methods to avoid any runtime
// induced failures when invoking the tree).
if (m_pCerNgenRootTable != NULL)
m_pCerNgenRootTable->Save(image, profileData);
+#endif
// Sort the list of RVA statics in an ascending order wrt the RVA
// and save them.
@@ -10761,6 +10773,7 @@ void Module::PlaceMethod(DataImage *image, MethodDesc *pMD, DWORD profilingFlags
image->PlaceStructureForAddress(pMD, CORCOMPILE_SECTION_WRITE);
}
+#ifdef FEATURE_CER
if (profilingFlags & (1 << ReadCerMethodList))
{
// protect against stale IBC data
@@ -10771,6 +10784,7 @@ void Module::PlaceMethod(DataImage *image, MethodDesc *pMD, DWORD profilingFlags
image->PlaceStructureForAddress(m_pCerNgenRootTable->GetList(pMD), CORCOMPILE_SECTION_HOT);
}
}
+#endif // FEATURE_CER
if (profilingFlags & (1 << WriteMethodPrecode))
{
@@ -11314,6 +11328,7 @@ void Module::Fixup(DataImage *image)
image->ZeroField(m_FileReferencesMap.pTable, 0,
m_FileReferencesMap.GetSize() * sizeof(void*));
+#ifdef FEATURE_CER
//
// Fixup Constrained Execution Regions restoration records.
//
@@ -11330,6 +11345,7 @@ void Module::Fixup(DataImage *image)
// Zero out fields we always compute at runtime lazily.
image->ZeroField(this, offsetof(Module, m_pCerPrepInfo), sizeof(m_pCerPrepInfo));
image->ZeroField(this, offsetof(Module, m_pCerCrst), sizeof(m_pCerCrst));
+#endif // FEATURE_CER
image->ZeroField(this, offsetof(Module, m_debuggerSpecificData), sizeof(m_debuggerSpecificData));
@@ -15593,7 +15609,7 @@ FieldDesc *Module::LookupFieldDef(mdFieldDef token)
#endif // DACCESS_COMPILE
-#ifndef DACCESS_COMPILE
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_CER)
// Access to CerPrepInfo, the structure used to track CERs prepared at runtime (as opposed to ngen time). GetCerPrepInfo will
// return the structure associated with the given method desc if it exists or NULL otherwise. CreateCerPrepInfo will get the
@@ -15745,7 +15761,7 @@ void Module::RestoreCer(MethodDesc *pMD)
#endif // FEATURE_PREJIT
-#endif // !DACCESS_COMPILE
+#endif // !DACCESS_COMPILE && FEATURE_CER
@@ -15911,9 +15927,9 @@ void Module::ExpandAll()
pMD->GetMDImport(),
&ignored));
#ifdef FEATURE_INTERPRETER
- pMD->MakeJitWorker(pHeader, CORJIT_FLG_MAKEFINALCODE, 0);
+ pMD->MakeJitWorker(pHeader, CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE));
#else
- pMD->MakeJitWorker(pHeader, 0, 0);
+ pMD->MakeJitWorker(pHeader, CORJIT_FLAGS());
#endif
}
}
diff --git a/src/vm/ceeload.h b/src/vm/ceeload.h
index ce2e76e277..d15bd6b3d1 100644
--- a/src/vm/ceeload.h
+++ b/src/vm/ceeload.h
@@ -96,14 +96,16 @@ class PersistentInlineTrackingMap;
// The native symbol reader dll name
#ifdef FEATURE_CORECLR
-#if defined(_TARGET_AMD64_)
+#if defined(_AMD64_)
#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.amd64.dll")
-#elif defined(_TARGET_X86_)
+#elif defined(_X86_)
#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.x86.dll")
-#elif defined(_TARGET_ARM_)
+#elif defined(_ARM_)
#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.arm.dll")
-#elif defined(_TARGET_ARM64_)
-#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.arm64.dll")
+#elif defined(_ARM64_)
+// Use diasymreader until the package has an arm64 version - issue #7360
+//#define NATIVE_SYMBOL_READER_DLL W("Microsoft.DiaSymReader.Native.arm64.dll")
+#define NATIVE_SYMBOL_READER_DLL W("diasymreader.dll")
#endif
#else
#define NATIVE_SYMBOL_READER_DLL W("diasymreader.dll")
@@ -3392,10 +3394,12 @@ public:
//-----------------------------------------------------------------------------------------
BOOL IsPreV4Assembly();
+#ifdef FEATURE_CER
//-----------------------------------------------------------------------------------------
// Get reliability contract info, see ConstrainedExecutionRegion.cpp for details.
//-----------------------------------------------------------------------------------------
DWORD GetReliabilityContract();
+#endif
//-----------------------------------------------------------------------------------------
// Parse/Return NeutralResourcesLanguageAttribute if it exists (updates Module member variables at ngen time)
@@ -3404,13 +3408,15 @@ public:
protected:
+#ifdef FEATURE_CER
Volatile<DWORD> m_dwReliabilityContract;
+#endif
// initialize Crst controlling the Dynamic IL hashtables
void InitializeDynamicILCrst();
-#ifndef DACCESS_COMPILE
public:
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_CER)
// Support for getting and creating information about Constrained Execution Regions rooted in this module.
@@ -3441,7 +3447,7 @@ public:
LIMITED_METHOD_CONTRACT;
return m_pCerCrst;
}
-#endif // !DACCESS_COMPILE
+#endif // !DACCESS_COMPILE && FEATURE_CER
#ifdef FEATURE_CORECLR
void VerifyAllMethods();
@@ -3454,11 +3460,13 @@ public:
}
private:
+#ifdef FEATURE_CER
EEPtrHashTable *m_pCerPrepInfo; // Root methods prepared for Constrained Execution Regions
Crst *m_pCerCrst; // Mutex protecting update access to both of the above hashes
#ifdef FEATURE_PREJIT
CerNgenRootTable *m_pCerNgenRootTable; // Root methods of CERs found during ngen and requiring runtime restoration
#endif
+#endif
// This struct stores the data used by the managed debugging infrastructure. If it turns out that
// the debugger is increasing the size of the Module class by too much, we can consider allocating
diff --git a/src/vm/ceemain.cpp b/src/vm/ceemain.cpp
index 07781261f7..5521d8a4d9 100644
--- a/src/vm/ceemain.cpp
+++ b/src/vm/ceemain.cpp
@@ -150,7 +150,7 @@
#include "frames.h"
#include "threads.h"
#include "stackwalk.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "interoputil.h"
#include "security.h"
#include "fieldmarshaler.h"
@@ -195,6 +195,7 @@
#include "finalizerthread.h"
#include "threadsuspend.h"
#include "disassembler.h"
+#include "gcenv.ee.h"
#ifndef FEATURE_PAL
#include "dwreport.h"
@@ -305,7 +306,6 @@ extern "C" HRESULT __cdecl CorDBGetInterface(DebugInterface** rcInterface);
#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
-void* __stdcall GetCLRFunction(LPCSTR FunctionName);
// Pointer to the activated CLR interface provided by the shim.
ICLRRuntimeInfo *g_pCLRRuntime = NULL;
@@ -640,7 +640,7 @@ void InitializeStartupFlags()
g_fEnableARM = TRUE;
#endif // !FEATURE_CORECLR
- GCHeap::InitializeHeapType((flags & STARTUP_SERVER_GC) != 0);
+ InitializeHeapType((flags & STARTUP_SERVER_GC) != 0);
#ifdef FEATURE_LOADER_OPTIMIZATION
g_dwGlobalSharePolicy = (flags&STARTUP_LOADER_OPTIMIZATION_MASK)>>1;
@@ -1932,15 +1932,17 @@ void STDMETHODCALLTYPE EEShutDownHelper(BOOL fIsDllUnloading)
#endif
#ifdef FEATURE_PREJIT
- // If we're doing basic block profiling, we need to write the log files to disk.
-
- static BOOL fIBCLoggingDone = FALSE;
- if (!fIBCLoggingDone)
{
- if (g_IBCLogger.InstrEnabled())
- Module::WriteAllModuleProfileData(true);
+ // If we're doing basic block profiling, we need to write the log files to disk.
+
+ static BOOL fIBCLoggingDone = FALSE;
+ if (!fIBCLoggingDone)
+ {
+ if (g_IBCLogger.InstrEnabled())
+ Module::WriteAllModuleProfileData(true);
- fIBCLoggingDone = TRUE;
+ fIBCLoggingDone = TRUE;
+ }
}
#endif // FEATURE_PREJIT
@@ -3719,7 +3721,16 @@ void InitializeGarbageCollector()
g_pFreeObjectMethodTable->SetBaseSize(ObjSizeOf (ArrayBase));
g_pFreeObjectMethodTable->SetComponentSize(1);
- GCHeap *pGCHeap = GCHeap::CreateGCHeap();
+#ifdef FEATURE_STANDALONE_GC
+ IGCToCLR* gcToClr = new (nothrow) GCToEEInterface();
+ if (!gcToClr)
+ ThrowOutOfMemory();
+#else
+ IGCToCLR* gcToClr = nullptr;
+#endif
+
+ IGCHeap *pGCHeap = InitializeGarbageCollector(gcToClr);
+ g_pGCHeap = pGCHeap;
if (!pGCHeap)
ThrowOutOfMemory();
@@ -3833,7 +3844,7 @@ BOOL STDMETHODCALLTYPE EEDllMain( // TRUE on success, FALSE on error.
{
// GetThread() may be set to NULL for Win9x during shutdown.
Thread *pThread = GetThread();
- if (GCHeap::IsGCInProgress() &&
+ if (GCHeapUtilities::IsGCInProgress() &&
( (pThread && (pThread != ThreadSuspend::GetSuspensionThread() ))
|| !g_fSuspendOnShutdown))
{
@@ -4643,7 +4654,6 @@ VOID STDMETHODCALLTYPE LogHelp_LogAssert( LPCSTR szFile, int iLine, LPCSTR expr)
}
-extern BOOL NoGuiOnAssert();
extern "C"
//__declspec(dllexport)
BOOL STDMETHODCALLTYPE LogHelp_NoGuiOnAssert()
@@ -4656,7 +4666,6 @@ BOOL STDMETHODCALLTYPE LogHelp_NoGuiOnAssert()
return fRet;
}
-extern VOID TerminateOnAssert();
extern "C"
//__declspec(dllexport)
VOID STDMETHODCALLTYPE LogHelp_TerminateOnAssert()
diff --git a/src/vm/cgensys.h b/src/vm/cgensys.h
index 205d8a223e..4dd1ee4b4b 100644
--- a/src/vm/cgensys.h
+++ b/src/vm/cgensys.h
@@ -103,21 +103,22 @@ inline void GetSpecificCpuInfo(CORINFO_CPU * cpuInfo)
#endif // !_TARGET_X86_
-#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
+#if (defined(_TARGET_X86_) || defined(_TARGET_AMD64_)) && !defined(CROSSGEN_COMPILE)
extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16]);
-#endif // defined(_TARGET_AMD64_)
+extern "C" DWORD __stdcall xmmYmmStateSupport();
+#endif
inline bool TargetHasAVXSupport()
{
-#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
+#if (defined(_TARGET_X86_) || defined(_TARGET_AMD64_)) && !defined(CROSSGEN_COMPILE)
unsigned char buffer[16];
- // All AMD64 targets support cpuid.
+ // All x86/AMD64 targets support cpuid.
(void) getcpuid(1, buffer);
// getcpuid executes cpuid with eax set to its first argument, and ecx cleared.
// It returns the resulting eax, ebx, ecx and edx (in that order) in buffer[].
// The AVX feature is ECX bit 28.
return ((buffer[11] & 0x10) != 0);
-#endif // defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
+#endif // (defined(_TARGET_X86_) || defined(_TARGET_AMD64_)) && !defined(CROSSGEN_COMPILE)
return false;
}
diff --git a/src/vm/class.cpp b/src/vm/class.cpp
index 961df2a104..cff71f328f 100644
--- a/src/vm/class.cpp
+++ b/src/vm/class.cpp
@@ -19,6 +19,7 @@
#include "constrainedexecutionregion.h"
#include "customattribute.h"
#include "encee.h"
+#include "typestring.h"
#ifdef FEATURE_COMINTEROP
#include "comcallablewrapper.h"
@@ -2490,12 +2491,14 @@ MethodTable::GetSubstitutionForParent(
#endif //!DACCESS_COMPILE
+#ifdef FEATURE_CER
//*******************************************************************************
DWORD EEClass::GetReliabilityContract()
{
LIMITED_METHOD_CONTRACT;
return HasOptionalFields() ? GetOptionalFields()->m_dwReliabilityContract : RC_NULL;
}
+#endif // FEATURE_CER
//*******************************************************************************
#ifdef FEATURE_PREJIT
@@ -3065,6 +3068,7 @@ void EEClass::Fixup(DataImage *image, MethodTable *pMT)
image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pUMThunkMarshInfo));
image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pStaticCallStub));
image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pMultiCastInvokeStub));
+ image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pSecureDelegateInvokeStub));
image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pMarshalStub));
#ifdef FEATURE_COMINTEROP
diff --git a/src/vm/class.h b/src/vm/class.h
index f19b9818ec..7517863278 100644
--- a/src/vm/class.h
+++ b/src/vm/class.h
@@ -703,7 +703,9 @@ class EEClassOptionalFields
#define MODULE_NON_DYNAMIC_STATICS ((DWORD)-1)
DWORD m_cbModuleDynamicID;
+#ifdef FEATURE_CER
DWORD m_dwReliabilityContract;
+#endif
SecurityProperties m_SecProps;
@@ -1768,12 +1770,14 @@ public:
// Cached class level reliability contract info, see ConstrainedExecutionRegion.cpp for details.
DWORD GetReliabilityContract();
+#ifdef FEATURE_CER
inline void SetReliabilityContract(DWORD dwValue)
{
LIMITED_METHOD_CONTRACT;
_ASSERTE(HasOptionalFields());
GetOptionalFields()->m_dwReliabilityContract = dwValue;
}
+#endif
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
// Get number of eightbytes used by a struct passed in registers.
@@ -2412,6 +2416,7 @@ public:
PTR_Stub m_pInstRetBuffCallStub;
PTR_MethodDesc m_pInvokeMethod;
PTR_Stub m_pMultiCastInvokeStub;
+ PTR_Stub m_pSecureDelegateInvokeStub;
UMThunkMarshInfo* m_pUMThunkMarshInfo;
PTR_MethodDesc m_pBeginInvokeMethod;
PTR_MethodDesc m_pEndInvokeMethod;
diff --git a/src/vm/class.inl b/src/vm/class.inl
index 9362a8328f..7d5c74d586 100644
--- a/src/vm/class.inl
+++ b/src/vm/class.inl
@@ -50,7 +50,9 @@ inline void EEClassOptionalFields::Init()
m_WinRTRedirectedTypeIndex = WinMDAdapter::RedirectedTypeIndex_Invalid;
#endif // FEATURE_COMINTEROP
m_cbModuleDynamicID = MODULE_NON_DYNAMIC_STATICS;
+#ifdef FEATURE_CER
m_dwReliabilityContract = RC_NULL;
+#endif
m_SecProps = 0;
#if defined(UNIX_AMD64_ABI) && defined(FEATURE_UNIX_AMD64_STRUCT_PASSING)
m_numberEightBytes = 0;
diff --git a/src/vm/classcompat.cpp b/src/vm/classcompat.cpp
index ac819941f9..50c56506a9 100644
--- a/src/vm/classcompat.cpp
+++ b/src/vm/classcompat.cpp
@@ -31,7 +31,7 @@
#include "log.h"
#include "fieldmarshaler.h"
#include "cgensys.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "security.h"
#include "dbginterface.h"
#include "comdelegate.h"
diff --git a/src/vm/classnames.h b/src/vm/classnames.h
index 949c6c4447..47f1fecdec 100644
--- a/src/vm/classnames.h
+++ b/src/vm/classnames.h
@@ -16,6 +16,9 @@
#define g_ArrayClassName "System.Array"
#define g_NullableName "Nullable`1"
+#ifdef FEATURE_SPAN_OF_T
+#define g_ByReferenceName "ByReference`1"
+#endif
#define g_CollectionsEnumerableItfName "System.Collections.IEnumerable"
#define g_CollectionsEnumeratorClassName "System.Collections.IEnumerator"
diff --git a/src/vm/clrprivbinderwinrt.cpp b/src/vm/clrprivbinderwinrt.cpp
index b82d46cdab..b4fb45c083 100644
--- a/src/vm/clrprivbinderwinrt.cpp
+++ b/src/vm/clrprivbinderwinrt.cpp
@@ -287,7 +287,8 @@ HRESULT CLRPrivBinderWinRT::BindWinRTAssemblyByName(
{
STANDARD_VM_CONTRACT;
HRESULT hr = S_OK;
- ReleaseHolder<CLRPrivAssemblyWinRT> pAssembly;
+ ReleaseHolder<CLRPrivAssemblyWinRT> pAssembly;
+ LPWSTR wszFullTypeName = nullptr;
#ifndef FEATURE_CORECLR
NewArrayHolder<WCHAR> wszAssemblySimpleName;
#endif
@@ -319,11 +320,13 @@ HRESULT CLRPrivBinderWinRT::BindWinRTAssemblyByName(
IfFailGo(fusion::util::GetProperty(pAssemblyName, ASM_NAME_NAME, &wszAssemblySimpleName));
#else
WCHAR wszAssemblySimpleName[_MAX_PATH];
- DWORD cchAssemblySimpleName = _MAX_PATH;
- IfFailGo(pAssemblyName->GetName(&cchAssemblySimpleName, wszAssemblySimpleName));
+ {
+ DWORD cchAssemblySimpleName = _MAX_PATH;
+ IfFailGo(pAssemblyName->GetName(&cchAssemblySimpleName, wszAssemblySimpleName));
+ }
#endif
- LPWSTR wszFullTypeName = wcschr(wszAssemblySimpleName, W('!'));
+ wszFullTypeName = wcschr(wszAssemblySimpleName, W('!'));
if (wszFullTypeName != nullptr)
{
diff --git a/src/vm/clsload.hpp b/src/vm/clsload.hpp
index e2705ae2e4..a3a0de3cf4 100644
--- a/src/vm/clsload.hpp
+++ b/src/vm/clsload.hpp
@@ -697,7 +697,7 @@ public:
// fLoadTypes=DontLoadTypes: if type isn't already in the loader's table, return NULL
// fLoadTypes=LoadTypes: if type isn't already in the loader's table, then create it
// Each comes in two variants, LoadXThrowing and LoadXNoThrow, the latter being just
- // a exception-handling wrapper around the former.
+ // an exception-handling wrapper around the former.
//
// Each also allows types to be loaded only up to a particular level (see classloadlevel.h).
// The class loader itself makes use of these levels to "break" recursion across
diff --git a/src/vm/codeman.cpp b/src/vm/codeman.cpp
index 89084dbe85..98f0e53d3d 100644
--- a/src/vm/codeman.cpp
+++ b/src/vm/codeman.cpp
@@ -31,6 +31,10 @@
#include "debuginfostore.h"
#include "strsafe.h"
+#ifdef FEATURE_CORECLR
+#include "configuration.h"
+#endif
+
#ifdef _WIN64
#define CHECK_DUPLICATED_STRUCT_LAYOUTS
#include "../debug/daccess/fntableaccess.h"
@@ -1183,6 +1187,7 @@ EEJitManager::EEJitManager()
// CRST_TAKEN_DURING_SHUTDOWN - We take this lock during shutdown if ETW is on (to do rundown)
m_CodeHeapCritSec( CrstSingleUseLock,
CrstFlags(CRST_UNSAFE_ANYMODE|CRST_DEBUGGER_THREAD|CRST_TAKEN_DURING_SHUTDOWN)),
+ m_CPUCompileFlags(),
m_EHClauseCritSec( CrstSingleUseLock )
{
CONTRACTL {
@@ -1196,41 +1201,34 @@ EEJitManager::EEJitManager()
#ifdef _TARGET_AMD64_
m_pEmergencyJumpStubReserveList = NULL;
#endif
-#ifdef _TARGET_AMD64_
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
m_JITCompilerOther = NULL;
#endif
+ m_fLegacyJitUsed = FALSE;
+
#ifdef ALLOW_SXS_JIT
m_alternateJit = NULL;
m_AltJITCompiler = NULL;
m_AltJITRequired = false;
#endif
- m_dwCPUCompileFlags = 0;
-
m_cleanupList = NULL;
}
-#if defined(_TARGET_AMD64_)
-extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16]);
-extern "C" DWORD __stdcall xmmYmmStateSupport();
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
bool DoesOSSupportAVX()
{
+ LIMITED_METHOD_CONTRACT;
+
#ifndef FEATURE_PAL
// On Windows we have an api(GetEnabledXStateFeatures) to check if AVX is supported
typedef DWORD64 (WINAPI *PGETENABLEDXSTATEFEATURES)();
PGETENABLEDXSTATEFEATURES pfnGetEnabledXStateFeatures = NULL;
- // Probe ApiSet first
- HMODULE hMod = WszLoadLibraryEx(W("api-ms-win-core-xstate-l2-1-0.dll"), NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
-
- if (hMod == NULL)
- {
- // On older OS's where apiset is not present probe kernel32
- hMod = WszLoadLibraryEx(WINDOWS_KERNEL32_DLLNAME_W, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
- if(hMod = NULL)
- return FALSE;
- }
+ HMODULE hMod = WszLoadLibraryEx(WINDOWS_KERNEL32_DLLNAME_W, NULL, LOAD_LIBRARY_SEARCH_SYSTEM32);
+ if(hMod = NULL)
+ return FALSE;
pfnGetEnabledXStateFeatures = (PGETENABLEDXSTATEFEATURES)GetProcAddress(hMod, "GetEnabledXStateFeatures");
@@ -1249,7 +1247,7 @@ bool DoesOSSupportAVX()
return TRUE;
}
-#endif // defined(_TARGET_AMD64_)
+#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
void EEJitManager::SetCpuInfo()
{
@@ -1259,7 +1257,7 @@ void EEJitManager::SetCpuInfo()
// NOTE: This function needs to be kept in sync with Zapper::CompileAssembly()
//
- DWORD dwCPUCompileFlags = 0;
+ CORJIT_FLAGS CPUCompileFlags;
#if defined(_TARGET_X86_)
// NOTE: if you're adding any flags here, you probably should also be doing it
@@ -1270,7 +1268,7 @@ void EEJitManager::SetCpuInfo()
switch (CPU_X86_FAMILY(cpuInfo.dwCPUType))
{
case CPU_X86_PENTIUM_4:
- dwCPUCompileFlags |= CORJIT_FLG_TARGET_P4;
+ CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_TARGET_P4);
break;
default:
break;
@@ -1278,15 +1276,17 @@ void EEJitManager::SetCpuInfo()
if (CPU_X86_USE_CMOV(cpuInfo.dwFeatures))
{
- dwCPUCompileFlags |= CORJIT_FLG_USE_CMOV |
- CORJIT_FLG_USE_FCOMI;
+ CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_CMOV);
+ CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_FCOMI);
}
if (CPU_X86_USE_SSE2(cpuInfo.dwFeatures))
{
- dwCPUCompileFlags |= CORJIT_FLG_USE_SSE2;
+ CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE2);
}
-#elif defined(_TARGET_AMD64_)
+#endif // _TARGET_X86_
+
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
unsigned char buffer[16];
DWORD maxCpuId = getcpuid(0, buffer);
if (maxCpuId >= 0)
@@ -1295,17 +1295,17 @@ void EEJitManager::SetCpuInfo()
// It returns the resulting eax in buffer[0-3], ebx in buffer[4-7], ecx in buffer[8-11],
// and edx in buffer[12-15].
// We will set the following flags:
- // CORJIT_FLG_USE_SSE3_4 if the following feature bits are set (input EAX of 1)
+ // CORJIT_FLAG_USE_SSE3_4 if the following feature bits are set (input EAX of 1)
// SSE3 - ECX bit 0 (buffer[8] & 0x01)
// SSSE3 - ECX bit 9 (buffer[9] & 0x02)
// SSE4.1 - ECX bit 19 (buffer[10] & 0x08)
// SSE4.2 - ECX bit 20 (buffer[10] & 0x10)
- // CORJIT_FLG_USE_AVX if the following feature bits are set (input EAX of 1), and xmmYmmStateSupport returns 1:
+ // CORJIT_FLAG_USE_AVX if the following feature bits are set (input EAX of 1), and xmmYmmStateSupport returns 1:
// OSXSAVE - ECX bit 27 (buffer[11] & 0x08)
// AVX - ECX bit 28 (buffer[11] & 0x10)
- // CORJIT_FLG_USE_AVX2 if the following feature bit is set (input EAX of 0x07 and input ECX of 0):
+ // CORJIT_FLAG_USE_AVX2 if the following feature bit is set (input EAX of 0x07 and input ECX of 0):
// AVX2 - EBX bit 5 (buffer[4] & 0x20)
- // CORJIT_FLG_USE_AVX_512 is not currently set, but defined so that it can be used in future without
+ // CORJIT_FLAG_USE_AVX_512 is not currently set, but defined so that it can be used in future without
// synchronously updating VM and JIT.
(void) getcpuid(1, buffer);
// If SSE2 is not enabled, there is no point in checking the rest.
@@ -1318,7 +1318,7 @@ void EEJitManager::SetCpuInfo()
((buffer[10] & 0x08) != 0) && // SSE4.1
((buffer[10] & 0x10) != 0)) // SSE4.2
{
- dwCPUCompileFlags |= CORJIT_FLG_USE_SSE3_4;
+ CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_SSE3_4);
}
if ((buffer[11] & 0x18) == 0x18)
{
@@ -1326,13 +1326,13 @@ void EEJitManager::SetCpuInfo()
{
if (xmmYmmStateSupport() == 1)
{
- dwCPUCompileFlags |= CORJIT_FLG_USE_AVX;
+ CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX);
if (maxCpuId >= 0x07)
{
(void) getcpuid(0x07, buffer);
if ((buffer[4] & 0x20) != 0)
{
- dwCPUCompileFlags |= CORJIT_FLG_USE_AVX2;
+ CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_USE_AVX2);
}
}
}
@@ -1341,13 +1341,13 @@ void EEJitManager::SetCpuInfo()
static ConfigDWORD fFeatureSIMD;
if (fFeatureSIMD.val(CLRConfig::EXTERNAL_FeatureSIMD) != 0)
{
- dwCPUCompileFlags |= CORJIT_FLG_FEATURE_SIMD;
+ CPUCompileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_FEATURE_SIMD);
}
}
}
-#endif // defined(_TARGET_AMD64_)
+#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
- m_dwCPUCompileFlags = dwCPUCompileFlags;
+ m_CPUCompileFlags = CPUCompileFlags;
}
// Define some data that we can use to get a better idea of what happened when we get a Watson dump that indicates the JIT failed to load.
@@ -1356,7 +1356,7 @@ void EEJitManager::SetCpuInfo()
enum JIT_LOAD_JIT_ID
{
JIT_LOAD_MAIN = 500, // The "main" JIT. Normally, this is named "clrjit.dll". Start at a number that is somewhat uncommon (i.e., not zero or 1) to help distinguish from garbage, in process dumps.
- JIT_LOAD_LEGACY, // The "legacy" JIT. Normally, this is named "compatjit.dll" (aka, JIT64). This only applies to AMD64.
+ JIT_LOAD_LEGACY, // The "legacy" JIT. Normally, this is named "compatjit.dll". This applies to AMD64 on Windows desktop, or x86 on Windows .NET Core.
JIT_LOAD_ALTJIT // An "altjit". By default, named "protojit.dll". Used both internally, as well as externally for JIT CTP builds.
};
@@ -1432,33 +1432,43 @@ static void LoadAndInitializeJIT(LPCWSTR pwzJitName, OUT HINSTANCE* phJit, OUT I
#ifdef FEATURE_CORECLR
PathString CoreClrFolderHolder;
extern HINSTANCE g_hThisInst;
+ bool havePath = false;
#if !defined(FEATURE_MERGE_JIT_AND_ENGINE)
if (g_CLRJITPath != nullptr)
{
- // If we have been asked to load a specific JIT binary, load it.
+ // If we have been asked to load a specific JIT binary, load from that path.
+ // The main JIT load will use exactly that name because pwzJitName will have
+ // been computed as the last component of g_CLRJITPath by ExecutionManager::GetJitName().
+ // Non-primary JIT names (such as compatjit or altjit) will be loaded from the
+ // same directory.
+ // (Ideally, g_CLRJITPath would just be the JIT path without the filename component,
+ // but that's not how the JIT_PATH variable was originally defined.)
CoreClrFolderHolder.Set(g_CLRJITPath);
+ havePath = true;
}
else
#endif // !defined(FEATURE_MERGE_JIT_AND_ENGINE)
if (WszGetModuleFileName(g_hThisInst, CoreClrFolderHolder))
{
// Load JIT from next to CoreCLR binary
+ havePath = true;
+ }
+
+ if (havePath && !CoreClrFolderHolder.IsEmpty())
+ {
SString::Iterator iter = CoreClrFolderHolder.End();
BOOL findSep = CoreClrFolderHolder.FindBack(iter, DIRECTORY_SEPARATOR_CHAR_W);
if (findSep)
{
SString sJitName(pwzJitName);
CoreClrFolderHolder.Replace(iter + 1, CoreClrFolderHolder.End() - (iter + 1), sJitName);
- }
- }
- if (!CoreClrFolderHolder.IsEmpty())
- {
- *phJit = CLRLoadLibrary(CoreClrFolderHolder.GetUnicode());
- if (*phJit != NULL)
- {
- hr = S_OK;
+ *phJit = CLRLoadLibrary(CoreClrFolderHolder.GetUnicode());
+ if (*phJit != NULL)
+ {
+ hr = S_OK;
+ }
}
}
@@ -1614,7 +1624,7 @@ BOOL EEJitManager::LoadJIT()
#else // !FEATURE_MERGE_JIT_AND_ENGINE
m_JITCompiler = NULL;
-#ifdef _TARGET_AMD64_
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
m_JITCompilerOther = NULL;
#endif
@@ -1623,8 +1633,8 @@ BOOL EEJitManager::LoadJIT()
// Set as a courtesy to code:CorCompileGetRuntimeDll
s_ngenCompilerDll = m_JITCompiler;
-
-#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR)
+
+#if (defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR)) || (defined(_TARGET_X86_) && defined(FEATURE_CORECLR))
// If COMPlus_UseLegacyJit=1, then we fall back to compatjit.dll.
//
// This fallback mechanism was introduced for Visual Studio "14" Preview, when JIT64 (the legacy JIT) was replaced with
@@ -1645,8 +1655,16 @@ BOOL EEJitManager::LoadJIT()
// is set, we also must use JIT64 for all NGEN compilations as well.
//
// See the document "RyuJIT Compatibility Fallback Specification.docx" for details.
+ //
+ // For .NET Core 1.2, RyuJIT for x86 is the primary jit (clrjit.dll) and JIT32 for x86 is the fallback, legacy JIT (compatjit.dll).
+ // Thus, the COMPlus_useLegacyJit=1 mechanism has been enabled for x86 CoreCLR. This scenario does not have the UseRyuJIT
+ // registry key, nor the AppX binder mode.
+#if defined(FEATURE_CORECLR)
+ bool fUseRyuJit = true;
+#else
bool fUseRyuJit = UseRyuJit();
+#endif
if ((!IsCompilationProcess() || !fUseRyuJit) && // Use RyuJIT for all NGEN, unless we're falling back to JIT64 for everything.
(newJitCompiler != nullptr)) // the main JIT must successfully load before we try loading the fallback JIT
@@ -1660,7 +1678,11 @@ BOOL EEJitManager::LoadJIT()
if (!fUsingCompatJit)
{
+#if defined(FEATURE_CORECLR)
+ DWORD useLegacyJit = Configuration::GetKnobBooleanValue(W("System.JIT.UseWindowsX86CoreLegacyJit"), CLRConfig::EXTERNAL_UseWindowsX86CoreLegacyJit);
+#else
DWORD useLegacyJit = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_UseLegacyJit); // uncached access, since this code is run no more than one time
+#endif
if (useLegacyJit == 1)
{
fUsingCompatJit = TRUE;
@@ -1689,7 +1711,7 @@ BOOL EEJitManager::LoadJIT()
{
// Now, load the compat jit and initialize it.
- LPWSTR pwzJitName = MAKEDLLNAME_W(L"compatjit");
+ LPCWSTR pwzJitName = MAKEDLLNAME_W(W("compatjit"));
// Note: if the compatjit fails to load, we ignore it, and continue to use the main JIT for
// everything. You can imagine a policy where if the user requests the compatjit, and we fail
@@ -1702,10 +1724,13 @@ BOOL EEJitManager::LoadJIT()
// Tell the main JIT to fall back to the "fallback" JIT compiler, in case some
// obfuscator tries to directly call the main JIT's getJit() function.
newJitCompiler->setRealJit(fallbackICorJitCompiler);
+
+ // Now, the compat JIT will be used.
+ m_fLegacyJitUsed = TRUE;
}
}
}
-#endif // defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR)
+#endif // (defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR)) || (defined(_TARGET_X86_) && defined(FEATURE_CORECLR))
#endif // !FEATURE_MERGE_JIT_AND_ENGINE
@@ -3397,7 +3422,7 @@ void ExecutionManager::CleanupCodeHeaps()
}
CONTRACTL_END;
- _ASSERTE (g_fProcessDetach || (GCHeap::IsGCInProgress() && ::IsGCThread()));
+ _ASSERTE (g_fProcessDetach || (GCHeapUtilities::IsGCInProgress() && ::IsGCThread()));
GetEEJitManager()->CleanupCodeHeaps();
}
@@ -3411,7 +3436,17 @@ void EEJitManager::CleanupCodeHeaps()
}
CONTRACTL_END;
- _ASSERTE (g_fProcessDetach || (GCHeap::IsGCInProgress() && ::IsGCThread()));
+ _ASSERTE (g_fProcessDetach || (GCHeapUtilities::IsGCInProgress() && ::IsGCThread()));
+
+ // Quick out, don't even take the lock if we have not cleanup to do.
+ // This is important because ETW takes the CodeHeapLock when it is doing
+ // rundown, and if there are many JIT compiled methods, this can take a while.
+ // Because cleanup is called synchronously before a GC, this means GCs get
+ // blocked while ETW is doing rundown. By not taking the lock we avoid
+ // this stall most of the time since cleanup is rare, and ETW rundown is rare
+ // the likelihood of both is very very rare.
+ if (m_cleanupList == NULL)
+ return;
CrstHolder ch(&m_CodeHeapCritSec);
@@ -4359,7 +4394,22 @@ LPCWSTR ExecutionManager::GetJitName()
LPCWSTR pwzJitName = NULL;
-#if !defined(FEATURE_CORECLR)
+#if defined(FEATURE_CORECLR)
+#if !defined(CROSSGEN_COMPILE)
+ if (g_CLRJITPath != nullptr)
+ {
+ const wchar_t* p = wcsrchr(g_CLRJITPath, DIRECTORY_SEPARATOR_CHAR_W);
+ if (p != nullptr)
+ {
+ pwzJitName = p + 1; // Return just the filename, not the directory name
+ }
+ else
+ {
+ pwzJitName = g_CLRJITPath;
+ }
+ }
+#endif // !defined(CROSSGEN_COMPILE)
+#else // !FEATURE_CORECLR
// Try to obtain a name for the jit library from the env. variable
IfFailThrow(CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_JitName, const_cast<LPWSTR *>(&pwzJitName)));
#endif // !FEATURE_CORECLR
@@ -4451,7 +4501,7 @@ RangeSection* ExecutionManager::GetRangeSection(TADDR addr)
// Unless we are on an MP system with many cpus
// where this sort of caching actually diminishes scaling during server GC
// due to many processors writing to a common location
- if (g_SystemInfo.dwNumberOfProcessors < 4 || !GCHeap::IsServerHeap() || !GCHeap::IsGCInProgress())
+ if (g_SystemInfo.dwNumberOfProcessors < 4 || !GCHeapUtilities::IsServerHeap() || !GCHeapUtilities::IsGCInProgress())
pHead->pLastUsed = pLast;
#endif
@@ -6104,12 +6154,12 @@ __forceinline bool Nirvana_PrintMethodDescWorker(__in_ecount(iBuffer) char * szB
if (*pNamespace != 0)
{
- if(FAILED(StringCchPrintfA(szBuffer, iBuffer, "%s.%s.%s", pNamespace, pClassName, pSigString)))
+ if (_snprintf_s(szBuffer, iBuffer, _TRUNCATE, "%s.%s.%s", pNamespace, pClassName, pSigString) == -1)
return false;
}
else
{
- if(FAILED(StringCchPrintfA(szBuffer, iBuffer, "%s.%s", pClassName, pSigString)))
+ if (_snprintf_s(szBuffer, iBuffer, _TRUNCATE, "%s.%s", pClassName, pSigString) == -1)
return false;
}
diff --git a/src/vm/codeman.h b/src/vm/codeman.h
index f143dd642c..0fe261a92f 100644
--- a/src/vm/codeman.h
+++ b/src/vm/codeman.h
@@ -140,6 +140,10 @@ public:
PTR_EE_ILEXCEPTION phdrJitEHInfo;
PTR_BYTE phdrJitGCInfo;
+#if defined(FEATURE_GDBJIT)
+ VOID* pCalledMethods;
+#endif
+
PTR_MethodDesc phdrMDesc;
#ifdef WIN64EXCEPTIONS
@@ -172,6 +176,13 @@ public:
SUPPORTS_DAC;
return phdrMDesc;
}
+#if defined(FEATURE_GDBJIT)
+ PTR_BYTE GetCalledMethods()
+ {
+ SUPPORTS_DAC;
+ return pCalledMethods;
+ }
+#endif
TADDR GetCodeStartAddress()
{
SUPPORTS_DAC;
@@ -205,6 +216,12 @@ public:
{
phdrMDesc = pMD;
}
+#if defined(FEATURE_GDBJIT)
+ void SetCalledMethods(VOID* pCM)
+ {
+ pCalledMethods = pCM;
+ }
+#endif
void SetStubCodeBlockKind(StubCodeBlockKind kind)
{
phdrMDesc = (PTR_MethodDesc)kind;
@@ -248,6 +265,13 @@ public:
SUPPORTS_DAC;
return pRealCodeHeader->phdrMDesc;
}
+#if defined(FEATURE_GDBJIT)
+ VOID* GetCalledMethods()
+ {
+ SUPPORTS_DAC;
+ return pRealCodeHeader->pCalledMethods;
+ }
+#endif
TADDR GetCodeStartAddress()
{
SUPPORTS_DAC;
@@ -286,6 +310,12 @@ public:
{
pRealCodeHeader->phdrMDesc = pMD;
}
+#if defined(FEATURE_GDBJIT)
+ void SetCalledMethods(VOID* pCM)
+ {
+ pRealCodeHeader->pCalledMethods = pCM;
+ }
+#endif
void SetStubCodeBlockKind(StubCodeBlockKind kind)
{
pRealCodeHeader = (PTR_RealCodeHeader)kind;
@@ -1116,17 +1146,17 @@ public:
#endif // !DACCESS_COMPILE
private:
- DWORD m_dwCPUCompileFlags;
+ CORJIT_FLAGS m_CPUCompileFlags;
#if !defined CROSSGEN_COMPILE && !defined DACCESS_COMPILE
void SetCpuInfo();
#endif
public:
- inline DWORD GetCPUCompileFlags()
+ inline CORJIT_FLAGS GetCPUCompileFlags()
{
LIMITED_METHOD_CONTRACT;
- return m_dwCPUCompileFlags;
+ return m_CPUCompileFlags;
}
private :
@@ -1163,10 +1193,16 @@ public:
public:
ICorJitCompiler * m_jit;
HINSTANCE m_JITCompiler;
-#ifdef _TARGET_AMD64_
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
HINSTANCE m_JITCompilerOther; // Stores the handle of the legacy JIT, if one is loaded.
#endif
+ // TRUE if the legacy/compat JIT was loaded successfully and will be used.
+ // This is available in all builds so if COMPlus_RequireLegacyJit=1 is set in a test,
+ // the test will fail in any build where the legacy JIT is not loaded, even if legacy
+ // fallback is not available in that build. This prevents unexpected silent successes.
+ BOOL m_fLegacyJitUsed;
+
#ifdef ALLOW_SXS_JIT
//put these at the end so that we don't mess up the offsets in the DAC.
ICorJitCompiler * m_alternateJit;
@@ -1801,7 +1837,7 @@ public:
ULONG GetFixedStackSize()
{
WRAPPER_NO_CONTRACT;
- return GetCodeManager()->GetFrameSize(GetGCInfo());
+ return GetCodeManager()->GetFrameSize(GetGCInfoToken());
}
#endif // WIN64EXCEPTIONS
diff --git a/src/vm/comdelegate.cpp b/src/vm/comdelegate.cpp
index 9ba1bdb328..4c85a0216e 100644
--- a/src/vm/comdelegate.cpp
+++ b/src/vm/comdelegate.cpp
@@ -1249,7 +1249,7 @@ PCODE COMDelegate::ConvertToCallback(MethodDesc* pMD)
// Get UMEntryThunk from appdomain thunkcache cache.
UMEntryThunk *pUMEntryThunk = GetAppDomain()->GetUMEntryThunkCache()->GetUMEntryThunk(pMD);
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
// System.Runtime.InteropServices.NativeCallableAttribute
BYTE* pData = NULL;
@@ -1281,7 +1281,7 @@ PCODE COMDelegate::ConvertToCallback(MethodDesc* pMD)
pUMThunkMarshalInfo->SetCallingConvention(callConv);
}
}
-#endif //_TARGET_X86_
+#endif //_TARGET_X86_ && !FEATURE_STUBS_AS_IL
pCode = (PCODE)pUMEntryThunk->GetCode();
_ASSERTE(pCode != NULL);
@@ -2395,7 +2395,7 @@ PCODE COMDelegate::TheDelegateInvokeStub()
}
CONTRACT_END;
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
static PCODE s_pInvokeStub;
if (s_pInvokeStub == NULL)
@@ -2415,7 +2415,7 @@ PCODE COMDelegate::TheDelegateInvokeStub()
RETURN s_pInvokeStub;
#else
RETURN GetEEFuncEntryPoint(SinglecastDelegateInvokeStub);
-#endif // _TARGET_X86_
+#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
}
// Get the cpu stub for a delegate invoke.
@@ -2931,47 +2931,61 @@ PCODE COMDelegate::GetSecureInvoke(MethodDesc* pMD)
#ifdef FEATURE_CAS_POLICY
#error GetSecureInvoke not implemented
#else
- GCX_PREEMP();
+ MethodTable * pDelegateMT = pMD->GetMethodTable();
+ DelegateEEClass* delegateEEClass = (DelegateEEClass*) pDelegateMT->GetClass();
+ Stub *pStub = delegateEEClass->m_pSecureDelegateInvokeStub;
+
+ if (pStub == NULL)
+ {
+
+ GCX_PREEMP();
+
+ MetaSig sig(pMD);
+
+ BOOL fReturnVal = !sig.IsReturnTypeVoid();
- MetaSig sig(pMD);
+ SigTypeContext emptyContext;
+ ILStubLinker sl(pMD->GetModule(), pMD->GetSignature(), &emptyContext, pMD, TRUE, TRUE, FALSE);
+
+ ILCodeStream *pCode = sl.NewCodeStream(ILStubLinker::kDispatch);
- BOOL fReturnVal = !sig.IsReturnTypeVoid();
+ // Load the "real" delegate
+ pCode->EmitLoadThis();
+ pCode->EmitLDFLD(pCode->GetToken(MscorlibBinder::GetField(FIELD__MULTICAST_DELEGATE__INVOCATION_LIST)));
- SigTypeContext emptyContext;
- ILStubLinker sl(pMD->GetModule(), pMD->GetSignature(), &emptyContext, pMD, TRUE, TRUE, FALSE);
+ // Load the arguments
+ UINT paramCount = 0;
+ while(paramCount < sig.NumFixedArgs())
+ pCode->EmitLDARG(paramCount++);
- ILCodeStream *pCode = sl.NewCodeStream(ILStubLinker::kDispatch);
+ // Call the delegate
+ pCode->EmitCALL(pCode->GetToken(pMD), sig.NumFixedArgs(), fReturnVal);
- // Load the "real" delegate
- pCode->EmitLoadThis();
- pCode->EmitLDFLD(pCode->GetToken(MscorlibBinder::GetField(FIELD__MULTICAST_DELEGATE__INVOCATION_LIST)));
+ // Return
+ pCode->EmitRET();
- // Load the arguments
- UINT paramCount = 0;
- while(paramCount < sig.NumFixedArgs())
- pCode->EmitLDARG(paramCount++);
+ PCCOR_SIGNATURE pSig;
+ DWORD cbSig;
- // Call the delegate
- pCode->EmitCALL(pCode->GetToken(pMD), sig.NumFixedArgs(), fReturnVal);
+ pMD->GetSig(&pSig,&cbSig);
- // Return
- pCode->EmitRET();
+ MethodDesc* pStubMD =
+ ILStubCache::CreateAndLinkNewILStubMethodDesc(pMD->GetLoaderAllocator(),
+ pMD->GetMethodTable(),
+ ILSTUB_SECUREDELEGATE_INVOKE,
+ pMD->GetModule(),
+ pSig, cbSig,
+ NULL,
+ &sl);
- PCCOR_SIGNATURE pSig;
- DWORD cbSig;
+ pStub = Stub::NewStub(JitILStub(pStubMD));
- pMD->GetSig(&pSig,&cbSig);
+ g_IBCLogger.LogEEClassCOWTableAccess(pDelegateMT);
- MethodDesc* pStubMD =
- ILStubCache::CreateAndLinkNewILStubMethodDesc(pMD->GetLoaderAllocator(),
- pMD->GetMethodTable(),
- ILSTUB_SECUREDELEGATE_INVOKE,
- pMD->GetModule(),
- pSig, cbSig,
- NULL,
- &sl);
+ InterlockedCompareExchangeT<PTR_Stub>(EnsureWritablePages(&delegateEEClass->m_pSecureDelegateInvokeStub), pStub, NULL);
- return Stub::NewStub(JitILStub(pStubMD))->GetEntryPoint();
+ }
+ return pStub->GetEntryPoint();
#endif
}
#else // FEATURE_STUBS_AS_IL
@@ -2986,32 +3000,44 @@ PCODE COMDelegate::GetSecureInvoke(MethodDesc* pMD)
}
CONTRACT_END;
- GCX_PREEMP();
-
- MetaSig sig(pMD);
+ MethodTable * pDelegateMT = pMD->GetMethodTable();
+ DelegateEEClass* delegateEEClass = (DelegateEEClass*) pDelegateMT->GetClass();
- UINT_PTR hash = CPUSTUBLINKER::HashMulticastInvoke(&sig);
+ Stub *pStub = delegateEEClass->m_pSecureDelegateInvokeStub;
- Stub *pStub = m_pSecureDelegateStubCache->GetStub(hash);
- if (!pStub)
+ if (pStub == NULL)
{
- CPUSTUBLINKER sl;
+ GCX_PREEMP();
- LOG((LF_CORDB,LL_INFO10000, "COMD::GIMS making a multicast delegate\n"));
- sl.EmitSecureDelegateInvoke(hash);
+ MetaSig sig(pMD);
- // The cache is process-wide, based on signature. It never unloads
- Stub *pCandidate = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap(), NEWSTUB_FL_MULTICAST);
+ UINT_PTR hash = CPUSTUBLINKER::HashMulticastInvoke(&sig);
- Stub *pWinner = m_pSecureDelegateStubCache->AttemptToSetStub(hash, pCandidate);
- pCandidate->DecRef();
- if (!pWinner)
- COMPlusThrowOM();
+ pStub = m_pSecureDelegateStubCache->GetStub(hash);
+ if (!pStub)
+ {
+ CPUSTUBLINKER sl;
- LOG((LF_CORDB,LL_INFO10000, "Putting a MC stub at 0x%x (code:0x%x)\n",
- pWinner, (BYTE*)pWinner+sizeof(Stub)));
+ LOG((LF_CORDB,LL_INFO10000, "COMD::GIMS making a multicast delegate\n"));
+ sl.EmitSecureDelegateInvoke(hash);
- pStub = pWinner;
+ // The cache is process-wide, based on signature. It never unloads
+ Stub *pCandidate = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap(), NEWSTUB_FL_MULTICAST);
+
+ Stub *pWinner = m_pSecureDelegateStubCache->AttemptToSetStub(hash, pCandidate);
+ pCandidate->DecRef();
+ if (!pWinner)
+ COMPlusThrowOM();
+
+ LOG((LF_CORDB,LL_INFO10000, "Putting a MC stub at 0x%x (code:0x%x)\n",
+ pWinner, (BYTE*)pWinner+sizeof(Stub)));
+
+ pStub = pWinner;
+ }
+
+ g_IBCLogger.LogEEClassCOWTableAccess(pDelegateMT);
+ EnsureWritablePages(&delegateEEClass->m_pSecureDelegateInvokeStub);
+ delegateEEClass->m_pSecureDelegateInvokeStub = pStub;
}
RETURN (pStub->GetEntryPoint());
}
diff --git a/src/vm/comdependenthandle.cpp b/src/vm/comdependenthandle.cpp
index 0d2cac53a8..6535a804ae 100644
--- a/src/vm/comdependenthandle.cpp
+++ b/src/vm/comdependenthandle.cpp
@@ -74,3 +74,24 @@ FCIMPL3(VOID, DependentHandle::nGetPrimaryAndSecondary, OBJECTHANDLE handle, Obj
}
FCIMPLEND
+FCIMPL2(VOID, DependentHandle::nSetPrimary, OBJECTHANDLE handle, Object *_primary)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(handle != NULL);
+
+ OBJECTREF primary(_primary);
+ StoreObjectInHandle(handle, primary);
+}
+FCIMPLEND
+
+FCIMPL2(VOID, DependentHandle::nSetSecondary, OBJECTHANDLE handle, Object *_secondary)
+{
+ FCALL_CONTRACT;
+
+ _ASSERTE(handle != NULL);
+
+ OBJECTREF secondary(_secondary);
+ SetDependentHandleSecondary(handle, secondary);
+}
+FCIMPLEND
diff --git a/src/vm/comdependenthandle.h b/src/vm/comdependenthandle.h
index 7cf5a1e641..7192a4bbc3 100644
--- a/src/vm/comdependenthandle.h
+++ b/src/vm/comdependenthandle.h
@@ -45,6 +45,8 @@ public:
static FCDECL2(VOID, nGetPrimary, OBJECTHANDLE handle, Object **outPrimary);
static FCDECL3(VOID, nGetPrimaryAndSecondary, OBJECTHANDLE handle, Object **outPrimary, Object **outSecondary);
static FCDECL1(VOID, nFree, OBJECTHANDLE handle);
+ static FCDECL2(VOID, nSetPrimary, OBJECTHANDLE handle, Object *primary);
+ static FCDECL2(VOID, nSetSecondary, OBJECTHANDLE handle, Object *secondary);
};
#endif
diff --git a/src/vm/commemoryfailpoint.cpp b/src/vm/commemoryfailpoint.cpp
index 276a9f305a..4d1ed6ef64 100644
--- a/src/vm/commemoryfailpoint.cpp
+++ b/src/vm/commemoryfailpoint.cpp
@@ -26,7 +26,7 @@ FCIMPL2(void, COMMemoryFailPoint::GetMemorySettings, UINT64* pMaxGCSegmentSize,
{
FCALL_CONTRACT;
- GCHeap * pGC = GCHeap::GetGCHeap();
+ IGCHeap * pGC = GCHeapUtilities::GetGCHeap();
size_t segment_size = pGC->GetValidSegmentSize(FALSE);
size_t large_segment_size = pGC->GetValidSegmentSize(TRUE);
_ASSERTE(segment_size < SIZE_T_MAX && large_segment_size < SIZE_T_MAX);
diff --git a/src/vm/commethodrental.cpp b/src/vm/commethodrental.cpp
index 0faf470a2a..0a5c011270 100644
--- a/src/vm/commethodrental.cpp
+++ b/src/vm/commethodrental.cpp
@@ -102,9 +102,9 @@ void QCALLTYPE COMMethodRental::SwapMethodBody(EnregisteredTypeHandle cls, INT32
COMPlusThrowHR(VLDTR_E_MD_BADHEADER);
#ifdef FEATURE_INTERPRETER
- pMethodDesc->MakeJitWorker(&header, CORJIT_FLG_MAKEFINALCODE, 0);
+ pMethodDesc->MakeJitWorker(&header, CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE));
#else // !FEATURE_INTERPRETER
- pMethodDesc->MakeJitWorker(&header, 0, 0);
+ pMethodDesc->MakeJitWorker(&header, CORJIT_FLAGS());
#endif // !FEATURE_INTERPRETER
}
diff --git a/src/vm/commodule.cpp b/src/vm/commodule.cpp
index 44a96d36ab..af6dc48d15 100644
--- a/src/vm/commodule.cpp
+++ b/src/vm/commodule.cpp
@@ -768,8 +768,7 @@ mdString QCALLTYPE COMModule::GetStringConstant(QCall::ModuleHandle pModule, LPC
_ASSERTE(pwzValue != NULL);
HRESULT hr = pRCW->GetEmitter()->DefineUserString(pwzValue, iLength, &strRef);
- if (FAILED(hr)) {
- _ASSERTE(hr == E_OUTOFMEMORY || !"Unknown failure in DefineUserString");
+ if (FAILED(hr)) {
COMPlusThrowHR(hr);
}
diff --git a/src/vm/common.h b/src/vm/common.h
index 123350334b..9de9f35141 100644
--- a/src/vm/common.h
+++ b/src/vm/common.h
@@ -177,7 +177,7 @@ typedef DPTR(class StringBufferObject) PTR_StringBufferObject;
typedef DPTR(class TypeHandle) PTR_TypeHandle;
typedef VPTR(class VirtualCallStubManager) PTR_VirtualCallStubManager;
typedef VPTR(class VirtualCallStubManagerManager) PTR_VirtualCallStubManagerManager;
-typedef VPTR(class GCHeap) PTR_GCHeap;
+typedef VPTR(class IGCHeap) PTR_IGCHeap;
//
// _UNCHECKED_OBJECTREF is for code that can't deal with DEBUG OBJECTREFs
diff --git a/src/vm/compile.cpp b/src/vm/compile.cpp
index 0a2c75da84..87107151eb 100644
--- a/src/vm/compile.cpp
+++ b/src/vm/compile.cpp
@@ -382,7 +382,6 @@ HRESULT CEECompileInfo::LoadAssemblyByPath(
Assembly * pAssembly;
HRESULT hrProcessLibraryBitnessMismatch = S_OK;
- bool verifyingImageIsAssembly = false;
// We don't want to do a LoadFrom, since they do not work with ngen. Instead,
// read the metadata from the file and do a bind based on that.
@@ -416,9 +415,6 @@ HRESULT CEECompileInfo::LoadAssemblyByPath(
fExplicitBindToNativeImage ? MDInternalImport_NoCache : MDInternalImport_Default);
}
-#if defined(FEATURE_WINDOWSPHONE)
- verifyingImageIsAssembly = true;
-#endif // FEATURE_WINDOWSPHONE
if (fExplicitBindToNativeImage && !pImage->HasReadyToRunHeader())
{
pImage->VerifyIsNIAssembly();
@@ -427,8 +423,6 @@ HRESULT CEECompileInfo::LoadAssemblyByPath(
{
pImage->VerifyIsAssembly();
}
-
- verifyingImageIsAssembly = false;
// Check to make sure the bitness of the assembly matches the bitness of the process
// we will be loading it into and store the result. If a COR_IMAGE_ERROR gets thrown
@@ -552,11 +546,7 @@ HRESULT CEECompileInfo::LoadAssemblyByPath(
}
EX_CATCH_HRESULT(hr);
- if (verifyingImageIsAssembly && hr != S_OK)
- {
- hr = NGEN_E_FILE_NOT_ASSEMBLY;
- }
- else if ( hrProcessLibraryBitnessMismatch != S_OK && ( hr == COR_E_BADIMAGEFORMAT || hr == HRESULT_FROM_WIN32(ERROR_BAD_EXE_FORMAT) ) )
+ if ( hrProcessLibraryBitnessMismatch != S_OK && ( hr == COR_E_BADIMAGEFORMAT || hr == HRESULT_FROM_WIN32(ERROR_BAD_EXE_FORMAT) ) )
{
hr = hrProcessLibraryBitnessMismatch;
}
@@ -1497,7 +1487,7 @@ void CEECompileInfo::CompressDebugInfo(
HRESULT CEECompileInfo::GetBaseJitFlags(
IN CORINFO_METHOD_HANDLE hMethod,
- OUT DWORD *pFlags)
+ OUT CORJIT_FLAGS *pFlags)
{
STANDARD_VM_CONTRACT;
@@ -3068,6 +3058,13 @@ private:
DWORD m_dwExtraData;
LPCWSTR m_wszManagedPDBSearchPath;
+ // Currently The DiasymWriter does not use the correct PDB signature for NGEN PDBS unless
+ // the NGEN DLL whose symbols are being generated end in .ni.dll. Thus we copy
+ // to this name if it does not follow this covention (as is true with readyToRun
+ // dlls). This variable remembers this temp file path so we can delete it after
+ // Pdb generation. If DiaSymWriter is fixed, we can remove this.
+ SString m_tempSourceDllName;
+
// Interfaces for reading IL PDB info
ReleaseHolder<ISymUnmanagedBinder> m_pBinder;
ReleaseHolder<ISymUnmanagedReader> m_pReader;
@@ -3115,6 +3112,8 @@ public:
ZeroMemory(m_wszPDBFilePath, sizeof(m_wszPDBFilePath));
}
+
+ ~NGenModulePdbWriter();
HRESULT WritePDBData();
@@ -3415,6 +3414,13 @@ HRESULT NGenModulePdbWriter::InitILPdbData()
return S_OK;
}
+NGenModulePdbWriter::~NGenModulePdbWriter()
+{
+ // Delete any temporary files we created.
+ if (m_tempSourceDllName.GetCount() != 0)
+ DeleteFileW(m_tempSourceDllName);
+ m_tempSourceDllName.Clear();
+}
//---------------------------------------------------------------------------------------
//
@@ -3449,8 +3455,32 @@ HRESULT NGenModulePdbWriter::WritePDBData()
PEImageLayout * pLoadedLayout = m_pModule->GetFile()->GetLoaded();
+ // Currently DiaSymReader does not work properly generating NGEN PDBS unless
+ // the DLL whose PDB is being generated ends in .ni.*. Unfortunately, readyToRun
+ // images do not follow this convention and end up producing bad PDBS. To fix
+ // this (without changing diasymreader.dll which ships indepdendently of .Net Core)
+ // we copy the file to somethign with this convention before generating the PDB
+ // and delete it when we are done.
+ SString dllPath = pLoadedLayout->GetPath();
+ if (!dllPath.EndsWithCaseInsensitive(L".ni.dll") && !dllPath.EndsWithCaseInsensitive(L".ni.exe"))
+ {
+ SString::Iterator fileNameStart = dllPath.Begin();
+ dllPath.FindBack(fileNameStart, '\\');
+
+ SString::Iterator ext = dllPath.End();
+ dllPath.FindBack(ext, '.');
+
+ // m_tempSourceDllName = Convertion of INPUT.dll to INPUT.ni.dll where the PDB lives.
+ m_tempSourceDllName = m_wszPdbPath;
+ m_tempSourceDllName += SString(dllPath, fileNameStart, ext - fileNameStart);
+ m_tempSourceDllName += L".ni";
+ m_tempSourceDllName += SString(dllPath, ext, dllPath.End() - ext);
+ CopyFileW(dllPath, m_tempSourceDllName, false);
+ dllPath = m_tempSourceDllName;
+ }
+
ReleaseHolder<ISymNGenWriter> pWriter1;
- hr = m_Create(pLoadedLayout->GetPath(), m_wszPdbPath, &pWriter1);
+ hr = m_Create(dllPath, m_wszPdbPath, &pWriter1);
if (FAILED(hr))
return hr;
@@ -5423,7 +5453,7 @@ static BOOL CanSatisfyConstraints(Instantiation typicalInst, Instantiation candi
StackScratchBuffer buffer;
thArg.GetName(candidateInstName);
char output[1024];
- sprintf(output, "Generics TypeDependencyAttribute processing: Couldn't satisfy a constraint. Class with Attribute: %s Bad candidate instantiated type: %s\r\n", pMT->GetDebugClassName(), candidateInstName.GetANSI(buffer));
+ _snprintf_s(output, _countof(output), _TRUNCATE, "Generics TypeDependencyAttribute processing: Couldn't satisfy a constraint. Class with Attribute: %s Bad candidate instantiated type: %s\r\n", pMT->GetDebugClassName(), candidateInstName.GetANSI(buffer));
OutputDebugStringA(output);
*/
#endif
@@ -6573,7 +6603,9 @@ void CEEPreloader::PrePrepareMethodIfNecessary(CORINFO_METHOD_HANDLE hMethod)
{
STANDARD_VM_CONTRACT;
+#ifdef FEATURE_CER
::PrePrepareMethodIfNecessary(hMethod);
+#endif
}
static void SetStubMethodDescOnInteropMethodDesc(MethodDesc* pInteropMD, MethodDesc* pStubMD, bool fReverseStub)
@@ -6650,9 +6682,9 @@ MethodDesc * CEEPreloader::CompileMethodStubIfNeeded(
{
if (!pStubMD->AsDynamicMethodDesc()->GetILStubResolver()->IsCompiled())
{
- DWORD dwJitFlags = pStubMD->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags();
+ CORJIT_FLAGS jitFlags = pStubMD->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags();
- pfnCallback(pCallbackContext, (CORINFO_METHOD_HANDLE)pStubMD, dwJitFlags);
+ pfnCallback(pCallbackContext, (CORINFO_METHOD_HANDLE)pStubMD, jitFlags);
}
#ifndef FEATURE_FULL_NGEN // Deduplication
diff --git a/src/vm/compile.h b/src/vm/compile.h
index 19bbac3228..8ee66dbec8 100644
--- a/src/vm/compile.h
+++ b/src/vm/compile.h
@@ -377,7 +377,7 @@ class CEECompileInfo : public ICorCompileInfo
HRESULT GetBaseJitFlags(
IN CORINFO_METHOD_HANDLE hMethod,
- OUT DWORD *pFlags);
+ OUT CORJIT_FLAGS *pFlags);
#ifdef _WIN64
SIZE_T getPersonalityValue();
diff --git a/src/vm/comsynchronizable.cpp b/src/vm/comsynchronizable.cpp
index ef195bf5de..e62ec13dde 100644
--- a/src/vm/comsynchronizable.cpp
+++ b/src/vm/comsynchronizable.cpp
@@ -1604,7 +1604,7 @@ FCIMPL0(Object*, ThreadNative::GetDomain)
}
FCIMPLEND
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && defined(_MSC_VER)
__declspec(naked) LPVOID __fastcall ThreadNative::FastGetDomain()
{
STATIC_CONTRACT_MODE_COOPERATIVE;
@@ -1624,7 +1624,7 @@ done:
ret
}
}
-#else // _TARGET_X86_
+#else // _TARGET_X86_ && _MSC_VER
LPVOID F_CALL_CONV ThreadNative::FastGetDomain()
{
CONTRACTL
@@ -1650,7 +1650,7 @@ LPVOID F_CALL_CONV ThreadNative::FastGetDomain()
}
return NULL;
}
-#endif // _TARGET_X86_
+#endif // _TARGET_X86_ && _MSC_VER
#ifdef FEATURE_REMOTING
// This is just a helper method that lets BCL get to the managed context
diff --git a/src/vm/comthreadpool.cpp b/src/vm/comthreadpool.cpp
index 7f629b508b..a4c7e75064 100644
--- a/src/vm/comthreadpool.cpp
+++ b/src/vm/comthreadpool.cpp
@@ -632,6 +632,7 @@ void SetAsyncResultProperties(
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_SO_TOLERANT;
+#ifndef FEATURE_CORECLR
ASYNCRESULTREF asyncResult = overlapped->m_asyncResult;
// only filestream is expected to have a null delegate in which
// case we do the necessary book-keeping here. However, for robustness
@@ -655,6 +656,7 @@ void SetAsyncResultProperties(
if ((h != NULL) && (h != (HANDLE) -1))
UnsafeSetEvent(h);
}
+#endif // !FEATURE_CORECLR
}
VOID BindIoCompletionCallBack_Worker(LPVOID args)
@@ -663,11 +665,11 @@ VOID BindIoCompletionCallBack_Worker(LPVOID args)
STATIC_CONTRACT_GC_TRIGGERS;
STATIC_CONTRACT_MODE_ANY;
STATIC_CONTRACT_SO_INTOLERANT;
-
+
DWORD ErrorCode = ((BindIoCompletion_Args *)args)->ErrorCode;
DWORD numBytesTransferred = ((BindIoCompletion_Args *)args)->numBytesTransferred;
LPOVERLAPPED lpOverlapped = ((BindIoCompletion_Args *)args)->lpOverlapped;
-
+
OVERLAPPEDDATAREF overlapped = ObjectToOVERLAPPEDDATAREF(OverlappedDataObject::GetOverlapped(lpOverlapped));
GCPROTECT_BEGIN(overlapped);
@@ -682,7 +684,7 @@ VOID BindIoCompletionCallBack_Worker(LPVOID args)
if (overlapped->m_iocb != NULL)
{
// Caution: the args are not protected, we have to garantee there's no GC from here till
- PREPARE_NONVIRTUAL_CALLSITE(METHOD__IOCB_HELPER__PERFORM_IOCOMPLETION_CALLBACK);
+ PREPARE_NONVIRTUAL_CALLSITE(METHOD__IOCB_HELPER__PERFORM_IOCOMPLETION_CALLBACK);
DECLARE_ARGHOLDER_ARRAY(arg, 3);
arg[ARGNUM_0] = DWORD_TO_ARGHOLDER(ErrorCode);
arg[ARGNUM_1] = DWORD_TO_ARGHOLDER(numBytesTransferred);
@@ -692,21 +694,23 @@ VOID BindIoCompletionCallBack_Worker(LPVOID args)
CALL_MANAGED_METHOD_NORET(arg);
}
else
- { // no user delegate to callback
+ {
+ // no user delegate to callback
_ASSERTE((overlapped->m_iocbHelper == NULL) || !"This is benign, but should be optimized");
+#ifndef FEATURE_CORECLR
// we cannot do this at threadpool initialization time since mscorlib may not have been loaded
if (!g_pAsyncFileStream_AsyncResultClass)
{
g_pAsyncFileStream_AsyncResultClass = MscorlibBinder::GetClass(CLASS__FILESTREAM_ASYNCRESULT);
}
+#endif // !FEATURE_CORECLR
SetAsyncResultProperties(overlapped, ErrorCode, numBytesTransferred);
}
GCPROTECT_END();
}
-
void __stdcall BindIoCompletionCallbackStubEx(DWORD ErrorCode,
DWORD numBytesTransferred,
LPOVERLAPPED lpOverlapped,
@@ -769,9 +773,6 @@ void __stdcall BindIoCompletionCallbackStubEx(DWORD ErrorCode,
ManagedThreadBase::ThreadPool(ADID(overlapped->GetAppDomainId()), BindIoCompletionCallBack_Worker, &args);
}
-
-
-
LOG((LF_INTEROP, LL_INFO10000, "Leaving IO_CallBackStub thread 0x%x retCode 0x%x, overlap 0x%x\n", pThread, ErrorCode, lpOverlapped));
// We should have released all locks.
_ASSERTE(g_fEEShutDown || pThread->m_dwLockCount == 0 || pThread->m_fRudeAborted);
diff --git a/src/vm/comutilnative.cpp b/src/vm/comutilnative.cpp
index b55c63549d..41655cb5b0 100644
--- a/src/vm/comutilnative.cpp
+++ b/src/vm/comutilnative.cpp
@@ -27,7 +27,7 @@
#include "frames.h"
#include "field.h"
#include "winwrap.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "fcall.h"
#include "invokeutil.h"
#include "eeconfig.h"
@@ -1478,7 +1478,11 @@ FCIMPL5(VOID, Buffer::BlockCopy, ArrayBase *src, int srcOffset, ArrayBase *dst,
PTR_BYTE dstPtr = dst->GetDataPtr() + dstOffset;
if ((srcPtr != dstPtr) && (count > 0)) {
+#if defined(_AMD64_) && !defined(PLATFORM_UNIX)
+ JIT_MemCpy(dstPtr, srcPtr, count);
+#else
memmove(dstPtr, srcPtr, count);
+#endif
}
FC_GC_POLL();
@@ -1524,7 +1528,11 @@ FCIMPL5(VOID, Buffer::InternalBlockCopy, ArrayBase *src, int srcOffset, ArrayBas
_ASSERTE(count >= 0);
// Copy the data.
+#if defined(_AMD64_) && !defined(PLATFORM_UNIX)
+ JIT_MemCpy(dst->GetDataPtr() + dstOffset, src->GetDataPtr() + srcOffset, count);
+#else
memmove(dst->GetDataPtr() + dstOffset, src->GetDataPtr() + srcOffset, count);
+#endif
FC_GC_POLL();
}
@@ -1638,7 +1646,7 @@ FCIMPL0(int, GCInterface::GetGcLatencyMode)
FC_GC_POLL_NOT_NEEDED();
- int result = (INT32)GCHeap::GetGCHeap()->GetGcLatencyMode();
+ int result = (INT32)GCHeapUtilities::GetGCHeap()->GetGcLatencyMode();
return result;
}
FCIMPLEND
@@ -1649,7 +1657,7 @@ FCIMPL1(int, GCInterface::SetGcLatencyMode, int newLatencyMode)
FC_GC_POLL_NOT_NEEDED();
- return GCHeap::GetGCHeap()->SetGcLatencyMode(newLatencyMode);
+ return GCHeapUtilities::GetGCHeap()->SetGcLatencyMode(newLatencyMode);
}
FCIMPLEND
@@ -1659,7 +1667,7 @@ FCIMPL0(int, GCInterface::GetLOHCompactionMode)
FC_GC_POLL_NOT_NEEDED();
- int result = (INT32)GCHeap::GetGCHeap()->GetLOHCompactionMode();
+ int result = (INT32)GCHeapUtilities::GetGCHeap()->GetLOHCompactionMode();
return result;
}
FCIMPLEND
@@ -1670,7 +1678,7 @@ FCIMPL1(void, GCInterface::SetLOHCompactionMode, int newLOHCompactionyMode)
FC_GC_POLL_NOT_NEEDED();
- GCHeap::GetGCHeap()->SetLOHCompactionMode(newLOHCompactionyMode);
+ GCHeapUtilities::GetGCHeap()->SetLOHCompactionMode(newLOHCompactionyMode);
}
FCIMPLEND
@@ -1681,7 +1689,7 @@ FCIMPL2(FC_BOOL_RET, GCInterface::RegisterForFullGCNotification, UINT32 gen2Perc
FC_GC_POLL_NOT_NEEDED();
- FC_RETURN_BOOL(GCHeap::GetGCHeap()->RegisterForFullGCNotification(gen2Percentage, lohPercentage));
+ FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->RegisterForFullGCNotification(gen2Percentage, lohPercentage));
}
FCIMPLEND
@@ -1690,7 +1698,7 @@ FCIMPL0(FC_BOOL_RET, GCInterface::CancelFullGCNotification)
FCALL_CONTRACT;
FC_GC_POLL_NOT_NEEDED();
- FC_RETURN_BOOL(GCHeap::GetGCHeap()->CancelFullGCNotification());
+ FC_RETURN_BOOL(GCHeapUtilities::GetGCHeap()->CancelFullGCNotification());
}
FCIMPLEND
@@ -1711,7 +1719,7 @@ FCIMPL1(int, GCInterface::WaitForFullGCApproach, int millisecondsTimeout)
HELPER_METHOD_FRAME_BEGIN_RET_0();
DWORD dwMilliseconds = ((millisecondsTimeout == -1) ? INFINITE : millisecondsTimeout);
- result = GCHeap::GetGCHeap()->WaitForFullGCApproach(dwMilliseconds);
+ result = GCHeapUtilities::GetGCHeap()->WaitForFullGCApproach(dwMilliseconds);
HELPER_METHOD_FRAME_END();
@@ -1736,7 +1744,7 @@ FCIMPL1(int, GCInterface::WaitForFullGCComplete, int millisecondsTimeout)
HELPER_METHOD_FRAME_BEGIN_RET_0();
DWORD dwMilliseconds = ((millisecondsTimeout == -1) ? INFINITE : millisecondsTimeout);
- result = GCHeap::GetGCHeap()->WaitForFullGCComplete(dwMilliseconds);
+ result = GCHeapUtilities::GetGCHeap()->WaitForFullGCComplete(dwMilliseconds);
HELPER_METHOD_FRAME_END();
@@ -1757,7 +1765,7 @@ FCIMPL1(int, GCInterface::GetGeneration, Object* objUNSAFE)
if (objUNSAFE == NULL)
FCThrowArgumentNull(W("obj"));
- int result = (INT32)GCHeap::GetGCHeap()->WhichGeneration(objUNSAFE);
+ int result = (INT32)GCHeapUtilities::GetGCHeap()->WhichGeneration(objUNSAFE);
FC_GC_POLL_RET();
return result;
}
@@ -1777,7 +1785,7 @@ FCIMPL2(int, GCInterface::CollectionCount, INT32 generation, INT32 getSpecialGCC
_ASSERTE(generation >= 0);
//We don't need to check the top end because the GC will take care of that.
- int result = (INT32)GCHeap::GetGCHeap()->CollectionCount(generation, getSpecialGCCount);
+ int result = (INT32)GCHeapUtilities::GetGCHeap()->CollectionCount(generation, getSpecialGCCount);
FC_GC_POLL_RET();
return result;
}
@@ -1793,7 +1801,7 @@ int QCALLTYPE GCInterface::StartNoGCRegion(INT64 totalSize, BOOL lohSizeKnown, I
GCX_COOP();
- retVal = GCHeap::GetGCHeap()->StartNoGCRegion((ULONGLONG)totalSize,
+ retVal = GCHeapUtilities::GetGCHeap()->StartNoGCRegion((ULONGLONG)totalSize,
lohSizeKnown,
(ULONGLONG)lohSize,
disallowFullBlockingGC);
@@ -1811,7 +1819,7 @@ int QCALLTYPE GCInterface::EndNoGCRegion()
BEGIN_QCALL;
- retVal = GCHeap::GetGCHeap()->EndNoGCRegion();
+ retVal = GCHeapUtilities::GetGCHeap()->EndNoGCRegion();
END_QCALL;
@@ -1837,7 +1845,7 @@ FCIMPL1(int, GCInterface::GetGenerationWR, LPVOID handle)
if (temp == NULL)
COMPlusThrowArgumentNull(W("weak handle"));
- iRetVal = (INT32)GCHeap::GetGCHeap()->WhichGeneration(OBJECTREFToObject(temp));
+ iRetVal = (INT32)GCHeapUtilities::GetGCHeap()->WhichGeneration(OBJECTREFToObject(temp));
HELPER_METHOD_FRAME_END();
@@ -1860,7 +1868,7 @@ INT64 QCALLTYPE GCInterface::GetTotalMemory()
BEGIN_QCALL;
GCX_COOP();
- iRetVal = (INT64) GCHeap::GetGCHeap()->GetTotalBytesInUse();
+ iRetVal = (INT64) GCHeapUtilities::GetGCHeap()->GetTotalBytesInUse();
END_QCALL;
@@ -1885,7 +1893,7 @@ void QCALLTYPE GCInterface::Collect(INT32 generation, INT32 mode)
//We don't need to check the top end because the GC will take care of that.
GCX_COOP();
- GCHeap::GetGCHeap()->GarbageCollect(generation, FALSE, mode);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, FALSE, mode);
END_QCALL;
}
@@ -1918,7 +1926,7 @@ FCIMPL0(int, GCInterface::GetMaxGeneration)
{
FCALL_CONTRACT;
- return(INT32)GCHeap::GetGCHeap()->GetMaxGeneration();
+ return(INT32)GCHeapUtilities::GetGCHeap()->GetMaxGeneration();
}
FCIMPLEND
@@ -1934,7 +1942,7 @@ FCIMPL0(INT64, GCInterface::GetAllocatedBytesForCurrentThread)
INT64 currentAllocated = 0;
Thread *pThread = GetThread();
- alloc_context* ac = pThread->GetAllocContext();
+ gc_alloc_context* ac = pThread->GetAllocContext();
currentAllocated = ac->alloc_bytes + ac->alloc_bytes_loh - (ac->alloc_limit - ac->alloc_ptr);
return currentAllocated;
@@ -1956,7 +1964,7 @@ FCIMPL1(void, GCInterface::SuppressFinalize, Object *obj)
if (!obj->GetMethodTable ()->HasFinalizer())
return;
- GCHeap::GetGCHeap()->SetFinalizationRun(obj);
+ GCHeapUtilities::GetGCHeap()->SetFinalizationRun(obj);
FC_GC_POLL();
}
FCIMPLEND
@@ -1977,7 +1985,7 @@ FCIMPL1(void, GCInterface::ReRegisterForFinalize, Object *obj)
if (obj->GetMethodTable()->HasFinalizer())
{
HELPER_METHOD_FRAME_BEGIN_1(obj);
- GCHeap::GetGCHeap()->RegisterForFinalization(-1, obj);
+ GCHeapUtilities::GetGCHeap()->RegisterForFinalization(-1, obj);
HELPER_METHOD_FRAME_END();
}
}
@@ -2079,7 +2087,7 @@ void GCInterface::AddMemoryPressure(UINT64 bytesAllocated)
m_ulThreshold = (addMethod > multMethod) ? addMethod : multMethod;
for (int i = 0; i <= 1; i++)
{
- if ((GCHeap::GetGCHeap()->CollectionCount(i) / RELATIVE_GC_RATIO) > GCHeap::GetGCHeap()->CollectionCount(i + 1))
+ if ((GCHeapUtilities::GetGCHeap()->CollectionCount(i) / RELATIVE_GC_RATIO) > GCHeapUtilities::GetGCHeap()->CollectionCount(i + 1))
{
gen_collect = i + 1;
break;
@@ -2089,14 +2097,14 @@ void GCInterface::AddMemoryPressure(UINT64 bytesAllocated)
PREFIX_ASSUME(gen_collect <= 2);
- if ((gen_collect == 0) || (m_gc_counts[gen_collect] == GCHeap::GetGCHeap()->CollectionCount(gen_collect)))
+ if ((gen_collect == 0) || (m_gc_counts[gen_collect] == GCHeapUtilities::GetGCHeap()->CollectionCount(gen_collect)))
{
GarbageCollectModeAny(gen_collect);
}
for (int i = 0; i < 3; i++)
{
- m_gc_counts [i] = GCHeap::GetGCHeap()->CollectionCount(i);
+ m_gc_counts [i] = GCHeapUtilities::GetGCHeap()->CollectionCount(i);
}
}
}
@@ -2115,7 +2123,7 @@ void GCInterface::CheckCollectionCount()
{
LIMITED_METHOD_CONTRACT;
- GCHeap * pHeap = GCHeap::GetGCHeap();
+ IGCHeap * pHeap = GCHeapUtilities::GetGCHeap();
if (m_gc_counts[2] != pHeap->CollectionCount(2))
{
@@ -2200,7 +2208,7 @@ void GCInterface::NewAddMemoryPressure(UINT64 bytesAllocated)
// If still over budget, check current managed heap size
if (newMemValue >= budget)
{
- GCHeap *pGCHeap = GCHeap::GetGCHeap();
+ IGCHeap *pGCHeap = GCHeapUtilities::GetGCHeap();
UINT64 heapOver3 = pGCHeap->GetCurrentObjSize() / 3;
if (budget < heapOver3) // Max
@@ -2274,7 +2282,7 @@ void GCInterface::RemoveMemoryPressure(UINT64 bytesAllocated)
for (int i = 0; i < 3; i++)
{
- m_gc_counts [i] = GCHeap::GetGCHeap()->CollectionCount(i);
+ m_gc_counts [i] = GCHeapUtilities::GetGCHeap()->CollectionCount(i);
}
}
}
@@ -2348,7 +2356,7 @@ NOINLINE void GCInterface::GarbageCollectModeAny(int generation)
CONTRACTL_END;
GCX_COOP();
- GCHeap::GetGCHeap()->GarbageCollect(generation, FALSE, collection_non_blocking);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(generation, FALSE, collection_non_blocking);
}
//
diff --git a/src/vm/constrainedexecutionregion.cpp b/src/vm/constrainedexecutionregion.cpp
index d256c43424..77b944c416 100644
--- a/src/vm/constrainedexecutionregion.cpp
+++ b/src/vm/constrainedexecutionregion.cpp
@@ -1745,10 +1745,6 @@ void PrepopulateGenericHandleCache(DictionaryLayout *pDictionaryLayout,
MethodDesc *pMD,
MethodTable *pMT)
{
-#ifdef FEATURE_CORECLR
- // Disable this function in CoreCLR to work around https://github.com/dotnet/corefx/issues/12412.
- LIMITED_METHOD_CONTRACT;
-#else
CONTRACTL {
THROWS;
GC_TRIGGERS;
@@ -1772,7 +1768,6 @@ void PrepopulateGenericHandleCache(DictionaryLayout *pDictionaryLayout,
}
pOverflows = pOverflows->GetNextLayout();
}
-#endif // FEATURE_CORECLR
}
#ifdef FEATURE_PREJIT
diff --git a/src/vm/constrainedexecutionregion.h b/src/vm/constrainedexecutionregion.h
index 93ceb63010..4b41b2570e 100644
--- a/src/vm/constrainedexecutionregion.h
+++ b/src/vm/constrainedexecutionregion.h
@@ -13,6 +13,7 @@
#ifndef __CONSTRAINED_EXECUTION_REGION_H
#define __CONSTRAINED_EXECUTION_REGION_H
+#ifdef FEATURE_CER
#include <corhlpr.h>
#include <typestring.h>
@@ -560,4 +561,6 @@ private:
#endif
};
+#endif // FEATURE_CER
+
#endif
diff --git a/src/vm/corhost.cpp b/src/vm/corhost.cpp
index c229a0ee07..6091bad9e2 100644
--- a/src/vm/corhost.cpp
+++ b/src/vm/corhost.cpp
@@ -5170,7 +5170,7 @@ public:
HRESULT hr = S_OK;
- if (Generation > (int) GCHeap::GetGCHeap()->GetMaxGeneration())
+ if (Generation > (int) GCHeapUtilities::GetGCHeap()->GetMaxGeneration())
hr = E_INVALIDARG;
if (SUCCEEDED(hr))
@@ -5188,7 +5188,7 @@ public:
EX_TRY
{
STRESS_LOG0(LF_GC, LL_INFO100, "Host triggers GC\n");
- hr = GCHeap::GetGCHeap()->GarbageCollect(Generation);
+ hr = GCHeapUtilities::GetGCHeap()->GarbageCollect(Generation);
}
EX_CATCH
{
@@ -5354,7 +5354,7 @@ HRESULT CCLRGCManager::_SetGCSegmentSize(SIZE_T SegmentSize)
HRESULT hr = S_OK;
// Sanity check the value, it must be a power of two and big enough.
- if (!GCHeap::IsValidSegmentSize(SegmentSize))
+ if (!GCHeapUtilities::GetGCHeap()->IsValidSegmentSize(SegmentSize))
{
hr = E_INVALIDARG;
}
@@ -5380,7 +5380,7 @@ HRESULT CCLRGCManager::_SetGCMaxGen0Size(SIZE_T MaxGen0Size)
HRESULT hr = S_OK;
// Sanity check the value is at least large enough.
- if (!GCHeap::IsValidGen0MaxSize(MaxGen0Size))
+ if (!GCHeapUtilities::GetGCHeap()->IsValidGen0MaxSize(MaxGen0Size))
{
hr = E_INVALIDARG;
}
@@ -6408,7 +6408,7 @@ HRESULT CCLRDebugManager::SetConnectionTasks(
}
// Check for Finalizer thread
- if (GCHeap::IsGCHeapInitialized() && (pThread == FinalizerThread::GetFinalizerThread()))
+ if (GCHeapUtilities::IsGCHeapInitialized() && (pThread == FinalizerThread::GetFinalizerThread()))
{
// _ASSERTE(!"Host should not try to schedule user code on our Finalizer Thread");
IfFailGo(E_INVALIDARG);
diff --git a/src/vm/crossgen/CMakeLists.txt b/src/vm/crossgen/CMakeLists.txt
index c2392a2d9a..c6ef163d53 100644
--- a/src/vm/crossgen/CMakeLists.txt
+++ b/src/vm/crossgen/CMakeLists.txt
@@ -16,7 +16,6 @@ set(VM_CROSSGEN_SOURCES
../comdelegate.cpp
../codeman.cpp
../compile.cpp
- ../constrainedexecutionregion.cpp
../custommarshalerinfo.cpp
../domainfile.cpp
../baseassemblyspec.cpp
@@ -97,6 +96,12 @@ set(VM_CROSSGEN_SOURCES
../crossgencompile.cpp
)
+if(FEATURE_CER)
+ list(APPEND VM_CROSSGEN_SOURCES
+ ../constrainedexecutionregion.cpp
+ )
+endif(FEATURE_CER)
+
if(FEATURE_READYTORUN)
list(APPEND VM_CROSSGEN_SOURCES
../readytoruninfo.cpp
diff --git a/src/vm/crossgen_mscorlib/mscorlib_crossgen.nativeproj b/src/vm/crossgen_mscorlib/mscorlib_crossgen.nativeproj
index be65670654..67e6f4acd2 100644
--- a/src/vm/crossgen_mscorlib/mscorlib_crossgen.nativeproj
+++ b/src/vm/crossgen_mscorlib/mscorlib_crossgen.nativeproj
@@ -1,5 +1,12 @@
<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003" ToolsVersion="dogfood">
+ <PropertyGroup>
+ <!-- Work around problems with loading System.Private.CoreLib.dll, -->
+ <!-- caused by inconsistent setting of UseLegacyCompiler and FeatureSpanOfT -->
+ <!-- between System.Private.CoreLib.dll and the runtime. -->
+ <UseLegacyCompiler>true</UseLegacyCompiler>
+ </PropertyGroup>
+
<Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\xplat\SetCrossGen.props" />
<PropertyGroup>
diff --git a/src/vm/crossgencompile.cpp b/src/vm/crossgencompile.cpp
index 85859c2d82..ffb025adb0 100644
--- a/src/vm/crossgencompile.cpp
+++ b/src/vm/crossgencompile.cpp
@@ -130,7 +130,7 @@ BOOL __SwitchToThread(DWORD, DWORD)
// Globals and misc other
//
-GPTR_IMPL(GCHeap,g_pGCHeap);
+GPTR_IMPL(IGCHeap,g_pGCHeap);
BOOL g_fEEOtherStartup=FALSE;
BOOL g_fEEComActivatedStartup=FALSE;
@@ -138,7 +138,7 @@ BOOL g_fEEComActivatedStartup=FALSE;
GVAL_IMPL_INIT(DWORD, g_fHostConfig, 0);
#ifdef FEATURE_SVR_GC
-SVAL_IMPL_INIT(uint32_t,GCHeap,gcHeapType,GCHeap::GC_HEAP_WKS);
+SVAL_IMPL_INIT(uint32_t,IGCHeap,gcHeapType,IGCHeap::GC_HEAP_WKS);
#endif
void UpdateGCSettingFromHost()
diff --git a/src/vm/crst.cpp b/src/vm/crst.cpp
index a72ec9d3c0..7bf9bd65da 100644
--- a/src/vm/crst.cpp
+++ b/src/vm/crst.cpp
@@ -627,7 +627,7 @@ void CrstBase::PreEnter()
|| (pThread != NULL && pThread->PreemptiveGCDisabled())
// If GC heap has not been initialized yet, there is no need to synchronize with GC.
// This check is mainly for code called from EEStartup.
- || (pThread == NULL && !GCHeap::IsGCHeapInitialized()) );
+ || (pThread == NULL && !GCHeapUtilities::IsGCHeapInitialized()) );
}
if ((pThread != NULL) &&
@@ -910,7 +910,7 @@ BOOL CrstBase::IsSafeToTake()
_ASSERTE(pThread == NULL ||
(pThread->PreemptiveGCDisabled() == ((m_dwFlags & CRST_UNSAFE_COOPGC) != 0)) ||
((m_dwFlags & (CRST_UNSAFE_ANYMODE | CRST_GC_NOTRIGGER_WHEN_TAKEN)) != 0) ||
- (GCHeap::IsGCInProgress() && pThread == ThreadSuspend::GetSuspensionThread()));
+ (GCHeapUtilities::IsGCInProgress() && pThread == ThreadSuspend::GetSuspensionThread()));
END_GETTHREAD_ALLOWED;
if (m_holderthreadid.IsCurrentThread())
diff --git a/src/vm/customattribute.cpp b/src/vm/customattribute.cpp
index 48d79a2271..a83815f8bf 100644
--- a/src/vm/customattribute.cpp
+++ b/src/vm/customattribute.cpp
@@ -141,58 +141,56 @@ CustomAttributeManagedValues Attribute::GetManagedCaValue(CaValue* pCaVal)
CustomAttributeManagedValues gc;
ZeroMemory(&gc, sizeof(gc));
-
- CorSerializationType type = pCaVal->type.tag;
-
- if (type == SERIALIZATION_TYPE_ENUM)
- {
- gc.string = StringObject::NewString(pCaVal->type.szEnumName, pCaVal->type.cEnumName);
- }
- else if (type == SERIALIZATION_TYPE_STRING)
- {
- gc.string = NULL;
+ GCPROTECT_BEGIN(gc)
+ {
+ CorSerializationType type = pCaVal->type.tag;
- if (pCaVal->str.pStr)
- gc.string = StringObject::NewString(pCaVal->str.pStr, pCaVal->str.cbStr);
- }
- else if (type == SERIALIZATION_TYPE_TYPE)
- {
- gc.string = StringObject::NewString(pCaVal->str.pStr, pCaVal->str.cbStr);
- }
- else if (type == SERIALIZATION_TYPE_SZARRAY)
- {
- CorSerializationType arrayType = pCaVal->type.arrayType;
- ULONG length = pCaVal->arr.length;
- BOOL bAllBlittableCa = arrayType != SERIALIZATION_TYPE_ENUM;
+ if (type == SERIALIZATION_TYPE_ENUM)
+ {
+ gc.string = StringObject::NewString(pCaVal->type.szEnumName, pCaVal->type.cEnumName);
+ }
+ else if (type == SERIALIZATION_TYPE_STRING)
+ {
+ gc.string = NULL;
+
+ if (pCaVal->str.pStr)
+ gc.string = StringObject::NewString(pCaVal->str.pStr, pCaVal->str.cbStr);
+ }
+ else if (type == SERIALIZATION_TYPE_TYPE)
+ {
+ gc.string = StringObject::NewString(pCaVal->str.pStr, pCaVal->str.cbStr);
+ }
+ else if (type == SERIALIZATION_TYPE_SZARRAY)
+ {
+ CorSerializationType arrayType = pCaVal->type.arrayType;
+ ULONG length = pCaVal->arr.length;
+ BOOL bAllBlittableCa = arrayType != SERIALIZATION_TYPE_ENUM;
- if (length == (ULONG)-1)
- return gc;
-
- gc.array = (CaValueArrayREF)AllocateValueSzArray(MscorlibBinder::GetClass(CLASS__CUSTOM_ATTRIBUTE_ENCODED_ARGUMENT), length);
- CustomAttributeValue* pValues = gc.array->GetDirectPointerToNonObjectElements();
+ if (arrayType == SERIALIZATION_TYPE_ENUM)
+ gc.string = StringObject::NewString(pCaVal->type.szEnumName, pCaVal->type.cEnumName);
- for (COUNT_T i = 0; i < length; i ++)
- Attribute::SetBlittableCaValue(&pValues[i], &pCaVal->arr[i], &bAllBlittableCa);
+ if (length != (ULONG)-1)
+ {
+ gc.array = (CaValueArrayREF)AllocateValueSzArray(MscorlibBinder::GetClass(CLASS__CUSTOM_ATTRIBUTE_ENCODED_ARGUMENT), length);
+ CustomAttributeValue* pValues = gc.array->GetDirectPointerToNonObjectElements();
- if (!bAllBlittableCa)
- {
- GCPROTECT_BEGIN(gc)
- {
- if (arrayType == SERIALIZATION_TYPE_ENUM)
- gc.string = StringObject::NewString(pCaVal->type.szEnumName, pCaVal->type.cEnumName);
-
for (COUNT_T i = 0; i < length; i ++)
+ Attribute::SetBlittableCaValue(&pValues[i], &pCaVal->arr[i], &bAllBlittableCa);
+
+ if (!bAllBlittableCa)
{
- CustomAttributeManagedValues managedCaValue = Attribute::GetManagedCaValue(&pCaVal->arr[i]);
- Attribute::SetManagedValue(
- managedCaValue,
- &gc.array->GetDirectPointerToNonObjectElements()[i]);
+ for (COUNT_T i = 0; i < length; i ++)
+ {
+ CustomAttributeManagedValues managedCaValue = Attribute::GetManagedCaValue(&pCaVal->arr[i]);
+ Attribute::SetManagedValue(
+ managedCaValue,
+ &gc.array->GetDirectPointerToNonObjectElements()[i]);
+ }
}
}
- GCPROTECT_END();
}
}
-
+ GCPROTECT_END();
return gc;
}
@@ -908,6 +906,7 @@ FCIMPL5(VOID, COMCustomAttribute::ParseAttributeUsageAttribute, PVOID pData, ULO
}
FCIMPLEND
+#ifdef FEATURE_CAS_POLICY
FCIMPL4(VOID, COMCustomAttribute::GetSecurityAttributes, ReflectModuleBaseObject *pModuleUNSAFE, DWORD tkToken, CLR_BOOL fAssembly, PTRARRAYREF* ppArray)
{
FCALL_CONTRACT;
@@ -993,6 +992,7 @@ FCIMPL4(VOID, COMCustomAttribute::GetSecurityAttributes, ReflectModuleBaseObject
HELPER_METHOD_FRAME_END();
}
FCIMPLEND
+#endif // FEATURE_CAS_POLICY
FCIMPL7(void, COMCustomAttribute::GetPropertyOrFieldData, ReflectModuleBaseObject *pModuleUNSAFE, BYTE** ppBlobStart, BYTE* pBlobEnd, STRINGREF* pName, CLR_BOOL* pbIsProperty, OBJECTREF* pType, OBJECTREF* value)
{
diff --git a/src/vm/dac/dacwks.targets b/src/vm/dac/dacwks.targets
index 82ab5439d5..6f6b9279a2 100644
--- a/src/vm/dac/dacwks.targets
+++ b/src/vm/dac/dacwks.targets
@@ -40,6 +40,7 @@
<CppCompile Include="$(ClrSrcDirectory)\vm\fptrstubs.cpp" />
<CppCompile Include="$(ClrSrcDirectory)\vm\frames.cpp" />
<CppCompile Include="$(ClrSrcDirectory)\vm\GCDecode.cpp" />
+ <CppCompile Include="$(ClrSrcDirectory)\vm\gcheaputilities.cpp" />
<CppCompile Include="$(ClrSrcDirectory)\vm\genericdict.cpp" />
<CppCompile Include="$(ClrSrcDirectory)\vm\generics.cpp" />
<CppCompile Include="$(ClrSrcDirectory)\vm\hash.cpp" />
diff --git a/src/vm/dataimage.cpp b/src/vm/dataimage.cpp
index 83ff0a4f9f..e90c7e6a0e 100644
--- a/src/vm/dataimage.cpp
+++ b/src/vm/dataimage.cpp
@@ -896,8 +896,10 @@ void DataImage::FixupRVAs()
FixupModuleRVAs();
FixupRvaStructure();
+#ifdef FEATURE_CER
if (m_module->m_pCerNgenRootTable != NULL)
m_module->m_pCerNgenRootTable->FixupRVAs(this);
+#endif
// Dev11 bug 181494 instrumentation
if (m_Fixups.GetCount() != m_iCurrentFixup) EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
diff --git a/src/vm/debugdebugger.cpp b/src/vm/debugdebugger.cpp
index 9ea5427dfe..c8b76bf6dc 100644
--- a/src/vm/debugdebugger.cpp
+++ b/src/vm/debugdebugger.cpp
@@ -22,7 +22,7 @@
#include "frames.h"
#include "vars.hpp"
#include "field.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "jitinterface.h"
#include "debugdebugger.h"
#include "dbginterface.h"
@@ -1403,7 +1403,7 @@ FCIMPL4(INT32, DebuggerAssert::ShowDefaultAssertDialog,
}
msgText.Append(W("Description: "));
msgText.Append(message);
-
+
StackSString stackTraceText;
if (gc.strStackTrace != NULL) {
stackTraceText.Append(W("Stack Trace:\n"));
@@ -1414,25 +1414,33 @@ FCIMPL4(INT32, DebuggerAssert::ShowDefaultAssertDialog,
windowTitle.Set(W("Assert Failure"));
}
- // We're taking a string from managed code, and we can't be sure it doesn't have stuff like %s or \n in it.
- // So, pass a format string of %s and pass the text as a vararg to our message box method.
- // Also, varargs and StackSString don't mix. Convert to string first.
- const WCHAR* msgTextAsUnicode = msgText.GetUnicode();
- result = EEMessageBoxNonLocalizedNonFatal(W("%s"), windowTitle, stackTraceText, MB_ABORTRETRYIGNORE | MB_ICONEXCLAMATION, msgTextAsUnicode);
-
- // map the user's choice to the values recognized by
- // the System.Diagnostics.Assert package
- if (result == IDRETRY)
- {
- result = FailDebug;
- }
- else if (result == IDIGNORE)
+ if (NoGuiOnAssert())
{
- result = FailIgnore;
+ fwprintf(stderr, W("%s\n%s\n%s\n"), windowTitle.GetUnicode(), msgText.GetUnicode(), stackTraceText.GetUnicode());
+ result = FailTerminate;
}
else
{
- result = FailTerminate;
+ // We're taking a string from managed code, and we can't be sure it doesn't have stuff like %s or \n in it.
+ // So, pass a format string of %s and pass the text as a vararg to our message box method.
+ // Also, varargs and StackSString don't mix. Convert to string first.
+ const WCHAR* msgTextAsUnicode = msgText.GetUnicode();
+ result = EEMessageBoxNonLocalizedNonFatal(W("%s"), windowTitle, stackTraceText, MB_ABORTRETRYIGNORE | MB_ICONEXCLAMATION, msgTextAsUnicode);
+
+ // map the user's choice to the values recognized by
+ // the System.Diagnostics.Assert package
+ if (result == IDRETRY)
+ {
+ result = FailDebug;
+ }
+ else if (result == IDIGNORE)
+ {
+ result = FailIgnore;
+ }
+ else
+ {
+ result = FailTerminate;
+ }
}
HELPER_METHOD_FRAME_END();
diff --git a/src/vm/debughelp.cpp b/src/vm/debughelp.cpp
index df769455aa..abe45d5da0 100644
--- a/src/vm/debughelp.cpp
+++ b/src/vm/debughelp.cpp
@@ -6,9 +6,9 @@
#include "common.h"
/*******************************************************************/
-/* The folowing routines used to exist in all builds so they could called from the
+/* The following routines used to exist in all builds so they could called from the
* debugger before we had strike.
- * Now most of them are only inclued in debug builds for diagnostics purposes.
+ * Now most of them are only included in debug builds for diagnostics purposes.
*/
/*******************************************************************/
@@ -24,6 +24,12 @@ BOOL isMemoryReadable(const TADDR start, unsigned len)
}
CONTRACTL_END;
+#if !defined(DACCESS_COMPILE) && defined(FEATURE_PAL)
+
+ return PAL_ProbeMemory((PVOID)start, len, FALSE);
+
+#else // !DACCESS_COMPILE && FEATURE_PAL
+
//
// To accomplish this in a no-throw way, we have to touch each and every page
// and see if it is in memory or not.
@@ -87,6 +93,7 @@ BOOL isMemoryReadable(const TADDR start, unsigned len)
}
return 1;
+#endif // !DACCESS_COMPILE && FEATURE_PAL
}
@@ -202,7 +209,7 @@ void *DumpEnvironmentBlock(void)
return WszGetEnvironmentStrings();
}
-#if defined(_TARGET_X86_)
+#if defined(_TARGET_X86_) && !defined(FEATURE_PAL)
/*******************************************************************/
// Dump the SEH chain to stderr
void PrintSEHChain(void)
@@ -1198,12 +1205,12 @@ void DumpGCInfo(MethodDesc* method)
_ASSERTE(codeInfo.GetRelOffset() == 0);
ICodeManager* codeMan = codeInfo.GetCodeManager();
- GCInfoToken table = codeInfo.GetGCInfoToken();
+ GCInfoToken gcInfoToken = codeInfo.GetGCInfoToken();
- unsigned methodSize = (unsigned)codeMan->GetFunctionSize(table);
+ unsigned methodSize = (unsigned)codeMan->GetFunctionSize(gcInfoToken);
- GCDump gcDump(table.Version);
- PTR_CBYTE gcInfo = PTR_CBYTE(table.Info);
+ GCDump gcDump(gcInfoToken.Version);
+ PTR_CBYTE gcInfo = PTR_CBYTE(gcInfoToken.Info);
gcDump.gcPrintf = printfToDbgOut;
diff --git a/src/vm/dllimport.cpp b/src/vm/dllimport.cpp
index f724169ebf..a3f7f30d86 100644
--- a/src/vm/dllimport.cpp
+++ b/src/vm/dllimport.cpp
@@ -1024,7 +1024,7 @@ public:
pcsUnmarshal->EmitRET();
}
- DWORD dwJitFlags = CORJIT_FLG_IL_STUB;
+ CORJIT_FLAGS jitFlags(CORJIT_FLAGS::CORJIT_FLAG_IL_STUB);
if (m_slIL.HasInteropParamExceptionInfo())
{
@@ -1049,7 +1049,7 @@ public:
else
{
// All other IL stubs will need to use the secret parameter.
- dwJitFlags |= CORJIT_FLG_PUBLISH_SECRET_PARAM;
+ jitFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_PUBLISH_SECRET_PARAM);
}
if (SF_IsReverseStub(m_dwStubFlags))
@@ -1114,7 +1114,7 @@ public:
m_slIL.GenerateCode(pbBuffer, cbCode);
m_slIL.GetLocalSig(pbLocalSig, cbSig);
- pResolver->SetJitFlags(dwJitFlags);
+ pResolver->SetJitFlags(jitFlags);
#ifdef LOGGING
LOG((LF_STUBS, LL_INFO1000, "---------------------------------------------------------------------\n"));
@@ -1153,7 +1153,7 @@ public:
LogILStubFlags(LF_STUBS, LL_INFO1000, m_dwStubFlags);
- m_slIL.LogILStub(dwJitFlags);
+ m_slIL.LogILStub(jitFlags);
}
LOG((LF_STUBS, LL_INFO1000, "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n"));
#endif // LOGGING
@@ -1170,7 +1170,7 @@ public:
pStubMD,
pbLocalSig,
cbSig,
- dwJitFlags,
+ jitFlags,
&convertToHRTryCatch,
&cleanupTryFinally,
maxStack,
@@ -1188,7 +1188,7 @@ public:
MethodDesc * pStubMD,
PCCOR_SIGNATURE pbLocalSig,
DWORD cbSig,
- DWORD dwJitFlags,
+ CORJIT_FLAGS jitFlags,
ILStubEHClause * pConvertToHRTryCatchBounds,
ILStubEHClause * pCleanupTryFinallyBounds,
DWORD maxStack,
@@ -1256,7 +1256,7 @@ public:
strILStubCode.AppendPrintf(W(".maxstack %d \n"), maxStack);
strILStubCode.AppendPrintf(W(".locals %s\n"), strLocalSig.GetUnicode());
- m_slIL.LogILStub(dwJitFlags, &strILStubCode);
+ m_slIL.LogILStub(jitFlags, &strILStubCode);
if (pConvertToHRTryCatchBounds->cbTryLength != 0 && pConvertToHRTryCatchBounds->cbHandlerLength != 0)
{
@@ -3201,7 +3201,7 @@ void PInvokeStaticSigInfo::DllImportInit(MethodDesc* pMD, LPCUTF8 *ppLibName, LP
// initialize data members to defaults
PreInit(pMD);
- // System.Runtime.InteropServices.DLLImportAttribute
+ // System.Runtime.InteropServices.DllImportAttribute
IMDInternalImport *pInternalImport = pMD->GetMDImport();
CorPinvokeMap mappingFlags = pmMaxValue;
mdModuleRef modref = mdModuleRefNil;
@@ -4940,7 +4940,7 @@ void NDirect::PopulateNDirectMethodDesc(NDirectMethodDesc* pNMD, PInvokeStaticSi
// Currently only ManagedToNativeComInteropStubAttribute is supported.
// It returns NULL if no such attribute(s) can be found.
// But if the attribute is found and is invalid, or something went wrong in the looking up
-// process, a exception will be thrown. If everything goes well, you'll get the MethodDesc
+// process, an exception will be thrown. If everything goes well, you'll get the MethodDesc
// of the stub method
HRESULT FindPredefinedILStubMethod(MethodDesc *pTargetMD, DWORD dwStubFlags, MethodDesc **ppRetStubMD)
{
@@ -5947,8 +5947,8 @@ PCODE JitILStub(MethodDesc* pStubMD)
// A dynamically generated IL stub
//
- DWORD dwFlags = pStubMD->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags();
- pCode = pStubMD->MakeJitWorker(NULL, dwFlags, 0);
+ CORJIT_FLAGS jitFlags = pStubMD->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags();
+ pCode = pStubMD->MakeJitWorker(NULL, jitFlags);
_ASSERTE(pCode == pStubMD->GetNativeCode());
}
diff --git a/src/vm/dllimportcallback.cpp b/src/vm/dllimportcallback.cpp
index 198a00795f..12613cb96b 100644
--- a/src/vm/dllimportcallback.cpp
+++ b/src/vm/dllimportcallback.cpp
@@ -164,7 +164,7 @@ EXTERN_C void STDCALL UM2MDoADCallBack(UMEntryThunk *pEntryThunk,
UNINSTALL_MANAGED_EXCEPTION_DISPATCHER;
}
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
EXTERN_C VOID __cdecl UMThunkStubRareDisable();
EXTERN_C Thread* __stdcall CreateThreadBlockThrow();
@@ -1010,7 +1010,7 @@ Stub *UMThunkMarshInfo::CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStat
return pcpusl->Link(pLoaderHeap);
}
-#else // _TARGET_X86_
+#else // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
PCODE UMThunkMarshInfo::GetExecStubEntryPoint()
{
@@ -1019,7 +1019,7 @@ PCODE UMThunkMarshInfo::GetExecStubEntryPoint()
return GetEEFuncEntryPoint(UMThunkStub);
}
-#endif // _TARGET_X86_
+#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
UMEntryThunkCache::UMEntryThunkCache(AppDomain *pDomain) :
m_crst(CrstUMEntryThunkCache),
@@ -1302,7 +1302,7 @@ UMThunkMarshInfo::~UMThunkMarshInfo()
}
CONTRACTL_END;
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
if (m_pExecStub)
m_pExecStub->DecRef();
#endif
@@ -1320,7 +1320,9 @@ MethodDesc* UMThunkMarshInfo::GetILStubMethodDesc(MethodDesc* pInvokeMD, PInvoke
dwStubFlags |= NDIRECTSTUB_FL_REVERSE_INTEROP; // could be either delegate interop or not--that info is passed in from the caller
#if defined(DEBUGGING_SUPPORTED)
- if (GetDebuggerCompileFlags(pSigInfo->GetModule(), 0) & CORJIT_FLG_DEBUG_CODE)
+ // Combining the next two lines, and eliminating jitDebuggerFlags, leads to bad codegen in x86 Release builds using Visual C++ 19.00.24215.1.
+ CORJIT_FLAGS jitDebuggerFlags = GetDebuggerCompileFlags(pSigInfo->GetModule(), CORJIT_FLAGS());
+ if (jitDebuggerFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE))
{
dwStubFlags |= NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL;
}
@@ -1362,7 +1364,7 @@ VOID UMThunkMarshInfo::LoadTimeInit(Signature sig, Module * pModule, MethodDesc
m_pModule = pModule;
m_sig = sig;
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
INDEBUG(m_cbRetPop = 0xcccc;)
#endif
}
@@ -1370,7 +1372,7 @@ VOID UMThunkMarshInfo::LoadTimeInit(Signature sig, Module * pModule, MethodDesc
#ifndef CROSSGEN_COMPILE
//----------------------------------------------------------
// This initializer finishes the init started by LoadTimeInit.
-// It does stub creation and can throw a exception.
+// It does stub creation and can throw an exception.
//
// It can safely be called multiple times and by concurrent
// threads.
@@ -1394,7 +1396,9 @@ VOID UMThunkMarshInfo::RunTimeInit()
DWORD dwStubFlags = NDIRECTSTUB_FL_NGENEDSTUB | NDIRECTSTUB_FL_REVERSE_INTEROP | NDIRECTSTUB_FL_DELEGATE;
#if defined(DEBUGGING_SUPPORTED)
- if (GetDebuggerCompileFlags(GetModule(), 0) & CORJIT_FLG_DEBUG_CODE)
+ // Combining the next two lines, and eliminating jitDebuggerFlags, leads to bad codegen in x86 Release builds using Visual C++ 19.00.24215.1.
+ CORJIT_FLAGS jitDebuggerFlags = GetDebuggerCompileFlags(GetModule(), CORJIT_FLAGS());
+ if (jitDebuggerFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE))
{
dwStubFlags |= NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL;
}
@@ -1403,7 +1407,7 @@ VOID UMThunkMarshInfo::RunTimeInit()
pFinalILStub = GetStubForInteropMethod(pMD, dwStubFlags, &pStubMD);
}
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
PInvokeStaticSigInfo sigInfo;
if (pMD != NULL)
@@ -1454,7 +1458,7 @@ VOID UMThunkMarshInfo::RunTimeInit()
pFinalExecStub->DecRef();
}
-#else // _TARGET_X86_
+#else // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
if (pFinalILStub == NULL)
{
@@ -1495,7 +1499,7 @@ VOID UMThunkMarshInfo::RunTimeInit()
//
m_cbActualArgSize = (pStubMD != NULL) ? pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() : pMD->SizeOfArgStack();
-#endif // _TARGET_X86_
+#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
// Must be the last thing we set!
InterlockedCompareExchangeT<PCODE>(&m_pILStub, pFinalILStub, (PCODE)1);
diff --git a/src/vm/dllimportcallback.h b/src/vm/dllimportcallback.h
index 6de87d77a3..c2ed6d0039 100644
--- a/src/vm/dllimportcallback.h
+++ b/src/vm/dllimportcallback.h
@@ -110,7 +110,7 @@ public:
return m_pMD;
}
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
PCODE GetExecStubEntryPoint()
{
WRAPPER_NO_CONTRACT;
@@ -199,18 +199,18 @@ public:
return (UINT32)offsetof(UMThunkMarshInfo, m_pILStub);
}
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
// Compiles an unmanaged to managed thunk for the given signature. The thunk
// will call the stub or, if fNoStub == TRUE, directly the managed target.
Stub *CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStaticSigInfo* pSigInfo, MetaSig *pMetaSig, BOOL fNoStub);
-#endif // _TARGET_X86_
+#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
private:
PCODE m_pILStub; // IL stub for marshaling
// On x86, NULL for no-marshal signatures
// On non-x86, the managed entrypoint for no-delegate no-marshal signatures
UINT32 m_cbActualArgSize; // caches m_pSig.SizeOfFrameArgumentArray()
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
Stub* m_pExecStub; // UMEntryThunk jumps directly here
UINT16 m_cbRetPop; // stack bytes popped by callee (for UpdateRegDisplay)
UINT16 m_callConv; // unmanaged calling convention and flags (CorPinvokeMap)
@@ -248,7 +248,7 @@ public:
static UMEntryThunk* CreateUMEntryThunk();
static VOID FreeUMEntryThunk(UMEntryThunk* p);
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
// Compiles an unmanaged to managed thunk with the given calling convention adaptation.
// - psrcofsregs are stack offsets that should be loaded to argument registers (ECX, EDX)
// - psrcofs are stack offsets that should be repushed for the managed target
@@ -263,7 +263,7 @@ public:
UINT *psrcofsregs,
UINT *psrcofs,
UINT retbufofs);
-#endif // _TARGET_X86_
+#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
#ifndef DACCESS_COMPILE
VOID LoadTimeInit(PCODE pManagedTarget,
@@ -569,12 +569,12 @@ private:
AppDomain *m_pDomain;
};
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
//-------------------------------------------------------------------------
// One-time creation of special prestub to initialize UMEntryThunks.
//-------------------------------------------------------------------------
Stub *GenerateUMThunkPrestub();
-#endif
+#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
//-------------------------------------------------------------------------
// NExport stub
diff --git a/src/vm/domainfile.cpp b/src/vm/domainfile.cpp
index 2353712c9e..bfb69cdd48 100644
--- a/src/vm/domainfile.cpp
+++ b/src/vm/domainfile.cpp
@@ -4140,8 +4140,8 @@ void DomainAssembly::EnumStaticGCRefs(promote_func* fn, ScanContext* sc)
}
CONTRACT_END;
- _ASSERTE(GCHeap::IsGCInProgress() &&
- GCHeap::IsServerHeap() &&
+ _ASSERTE(GCHeapUtilities::IsGCInProgress() &&
+ GCHeapUtilities::IsServerHeap() &&
IsGCSpecialThread());
DomainModuleIterator i = IterateModules(kModIterIncludeLoaded);
diff --git a/src/vm/dwreport.cpp b/src/vm/dwreport.cpp
index 77669b2f14..5ae4f84de2 100644
--- a/src/vm/dwreport.cpp
+++ b/src/vm/dwreport.cpp
@@ -3212,7 +3212,7 @@ FaultReportResult DoFaultReport( // Was Watson attempted, successful?
// thread under Coop mode, this will let the new generated DoFaultReportCallBack
// thread trigger a deadlock. So in this case, we should directly abort the fault
// report to avoid the deadlock.
- ((IsGCThread() || pThread->PreemptiveGCDisabled()) && GCHeap::IsGCInProgress()) ||
+ ((IsGCThread() || pThread->PreemptiveGCDisabled()) && GCHeapUtilities::IsGCInProgress()) ||
FAILED(g_pDebugInterface->RequestFavor(DoFaultReportFavorWorker, pData)))
{
// If we can't initialize the debugger helper thread or we are running on the debugger helper
diff --git a/src/vm/ecalllist.h b/src/vm/ecalllist.h
index 8d4164e018..b110d0eea6 100644
--- a/src/vm/ecalllist.h
+++ b/src/vm/ecalllist.h
@@ -108,6 +108,8 @@ FCFuncStart(gDependentHandleFuncs)
FCFuncElement("nGetPrimary", DependentHandle::nGetPrimary)
FCFuncElement("nGetPrimaryAndSecondary", DependentHandle::nGetPrimaryAndSecondary)
FCFuncElement("nFree", DependentHandle::nFree)
+ FCFuncElement("nSetPrimary", DependentHandle::nSetPrimary)
+ FCFuncElement("nSetSecondary", DependentHandle::nSetSecondary)
FCFuncEnd()
#ifndef FEATURE_CORECLR
@@ -195,15 +197,6 @@ FCFuncStart(gTimeSpanFuncs)
FCFuncEnd()
#endif // !FEATURE_CORECLR
-#ifndef FEATURE_CORECLR // FCalls used by System.TimeZone
-FCFuncStart(gTimeZoneFuncs)
- FCFuncElement("nativeGetTimeZoneMinuteOffset", COMNlsInfo::nativeGetTimeZoneMinuteOffset)
- FCFuncElement("nativeGetStandardName", COMNlsInfo::nativeGetStandardName)
- FCFuncElement("nativeGetDaylightName", COMNlsInfo::nativeGetDaylightName)
- FCFuncElement("nativeGetDaylightChanges", COMNlsInfo::nativeGetDaylightChanges)
-FCFuncEnd()
-#endif // FEATURE_CORECLR
-
FCFuncStart(gObjectFuncs)
FCIntrinsic("GetType", ObjectNative::GetClass, CORINFO_INTRINSIC_Object_GetType)
FCFuncElement("MemberwiseClone", ObjectNative::Clone)
@@ -297,6 +290,7 @@ FCFuncStart(gEnvironmentFuncs)
FCFuncElement("GetResourceFromDefault", GetResourceFromDefault)
#endif // !FEATURE_CORECLR
FCFuncElement("GetCommandLineArgsNative", SystemNative::GetCommandLineArgs)
+ FCFuncElement("get_CurrentProcessorNumber", SystemNative::GetCurrentProcessorNumber)
#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORESYSTEM)
QCFuncElement("WinRTSupported", SystemNative::WinRTSupported)
@@ -650,9 +644,11 @@ FCFuncStart(gCustomAttributeEncodedArgument)
FCFuncElement("ParseAttributeArguments", Attribute::ParseAttributeArguments)
FCFuncEnd()
+#ifdef FEATURE_CAS_POLICY
FCFuncStart(gPseudoCustomAttribute)
FCFuncElement("_GetSecurityAttributes", COMCustomAttribute::GetSecurityAttributes)
FCFuncEnd()
+#endif
FCFuncStart(gCOMCustomAttributeFuncs)
FCFuncElement("_ParseAttributeUsageAttribute", COMCustomAttribute::ParseAttributeUsageAttribute)
@@ -1041,7 +1037,7 @@ FCFuncStart(gTypeNameBuilder)
QCFuncElement("Clear", TypeNameBuilder::_Clear)
FCFuncEnd()
-#ifndef FEATURE_CORECLR
+
FCFuncStart(gSafeTypeNameParserHandle)
QCFuncElement("_ReleaseTypeNameParser", TypeName::QReleaseTypeNameParser)
FCFuncEnd()
@@ -1053,7 +1049,6 @@ FCFuncStart(gTypeNameParser)
QCFuncElement("_GetModifiers", TypeName::QGetModifiers)
QCFuncElement("_GetAssemblyName", TypeName::QGetAssemblyName)
FCFuncEnd()
-#endif //!FEATURE_CORECLR
#ifdef FEATURE_CAS_POLICY
FCFuncStart(gPEFileFuncs)
@@ -1146,6 +1141,7 @@ FCFuncEnd()
FCFuncStart(gAssemblyLoadContextFuncs)
QCFuncElement("InitializeAssemblyLoadContext", AssemblyNative::InitializeAssemblyLoadContext)
QCFuncElement("LoadFromPath", AssemblyNative::LoadFromPath)
+ QCFuncElement("GetLoadedAssembliesInternal", AssemblyNative::GetLoadedAssembliesInternal)
QCFuncElement("InternalLoadUnmanagedDllFromPath", AssemblyNative::InternalLoadUnmanagedDllFromPath)
QCFuncElement("OverrideDefaultAssemblyLoadContextForCurrentDomain", AssemblyNative::OverrideDefaultAssemblyLoadContextForCurrentDomain)
QCFuncElement("CanUseAppPathAssemblyLoadContextInCurrentDomain", AssemblyNative::CanUseAppPathAssemblyLoadContextInCurrentDomain)
@@ -1165,9 +1161,9 @@ FCFuncStart(gAssemblyNameFuncs)
FCFuncElement("nGetPublicKeyToken", AssemblyNameNative::GetPublicKeyToken)
#ifndef FEATURE_CORECLR
FCFuncElement("EscapeCodeBase", AssemblyNameNative::EscapeCodeBase)
+#endif // !FEATURE_CORECLR
FCFuncElement("ReferenceMatchesDefinitionInternal", AssemblyNameNative::ReferenceMatchesDefinition)
FCFuncElement("nGetFileInformation", AssemblyNameNative::GetFileInformation)
-#endif // !FEATURE_CORECLR
FCFuncEnd()
FCFuncStart(gLoaderAllocatorFuncs)
@@ -1251,32 +1247,63 @@ FCFuncStart(gMathFuncs)
FCIntrinsic("Tanh", COMDouble::Tanh, CORINFO_INTRINSIC_Tanh)
FCFuncEnd()
+FCFuncStart(gMathFFuncs)
+ FCIntrinsic("Acos", COMSingle::Acos, CORINFO_INTRINSIC_Acos)
+ FCIntrinsic("Asin", COMSingle::Asin, CORINFO_INTRINSIC_Asin)
+ FCIntrinsic("Atan", COMSingle::Atan, CORINFO_INTRINSIC_Atan)
+ FCIntrinsic("Atan2", COMSingle::Atan2, CORINFO_INTRINSIC_Atan2)
+ FCIntrinsic("Ceiling", COMSingle::Ceil, CORINFO_INTRINSIC_Ceiling)
+ FCIntrinsic("Cos", COMSingle::Cos, CORINFO_INTRINSIC_Cos)
+ FCIntrinsic("Cosh", COMSingle::Cosh, CORINFO_INTRINSIC_Cosh)
+ FCIntrinsic("Exp", COMSingle::Exp, CORINFO_INTRINSIC_Exp)
+ FCIntrinsic("Floor", COMSingle::Floor, CORINFO_INTRINSIC_Floor)
+ FCFuncElement("Log", COMSingle::Log)
+ FCIntrinsic("Log10", COMSingle::Log10, CORINFO_INTRINSIC_Log10)
+ FCIntrinsic("Pow", COMSingle::Pow, CORINFO_INTRINSIC_Pow)
+ FCIntrinsic("Round", COMSingle::Round, CORINFO_INTRINSIC_Round)
+ FCIntrinsic("Sin", COMSingle::Sin, CORINFO_INTRINSIC_Sin)
+ FCIntrinsic("Sinh", COMSingle::Sinh, CORINFO_INTRINSIC_Sinh)
+ FCFuncElement("SplitFractionSingle", COMSingle::ModF)
+ FCIntrinsic("Sqrt", COMSingle::Sqrt, CORINFO_INTRINSIC_Sqrt)
+ FCIntrinsic("Tan", COMSingle::Tan, CORINFO_INTRINSIC_Tan)
+ FCIntrinsic("Tanh", COMSingle::Tanh, CORINFO_INTRINSIC_Tanh)
+FCFuncEnd()
+
+FCFuncStart(gRuntimeThreadFuncs)
+ FCFuncElement("get_IsAlive", ThreadNative::IsAlive)
+ FCFuncElement("IsBackgroundNative", ThreadNative::IsBackground)
+ FCFuncElement("SetBackgroundNative", ThreadNative::SetBackground)
+ FCFuncElement("get_IsThreadPoolThread", ThreadNative::IsThreadpoolThread)
+ FCFuncElement("GetPriorityNative", ThreadNative::GetPriority)
+ FCFuncElement("SetPriorityNative", ThreadNative::SetPriority)
+ FCFuncElement("GetThreadStateNative", ThreadNative::GetThreadState)
+#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
+ FCFuncElement("GetApartmentStateNative", ThreadNative::GetApartmentState)
+ FCFuncElement("SetApartmentStateNative", ThreadNative::SetApartmentState)
+#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
+#ifdef FEATURE_COMINTEROP
+ FCFuncElement("DisableComObjectEagerCleanup", ThreadNative::DisableComObjectEagerCleanup)
+#endif // FEATURE_COMINTEROP
+ FCFuncElement("InterruptInternal", ThreadNative::Interrupt)
+ FCFuncElement("JoinInternal", ThreadNative::Join)
+FCFuncEnd()
+
FCFuncStart(gThreadFuncs)
FCDynamic("InternalGetCurrentThread", CORINFO_INTRINSIC_Illegal, ECall::InternalGetCurrentThread)
FCFuncElement("StartInternal", ThreadNative::Start)
#ifndef FEATURE_CORECLR
FCFuncElement("SuspendInternal", ThreadNative::Suspend)
FCFuncElement("ResumeInternal", ThreadNative::Resume)
- FCFuncElement("InterruptInternal", ThreadNative::Interrupt)
-#endif
- FCFuncElement("get_IsAlive", ThreadNative::IsAlive)
- FCFuncElement("GetThreadStateNative", ThreadNative::GetThreadState)
-#ifndef FEATURE_CORECLR
- FCFuncElement("GetPriorityNative", ThreadNative::GetPriority)
- FCFuncElement("SetPriorityNative", ThreadNative::SetPriority)
#endif
#ifdef FEATURE_LEAK_CULTURE_INFO
FCFuncElement("nativeGetSafeCulture", ThreadNative::nativeGetSafeCulture)
#else
QCFuncElement("nativeInitCultureAccessors", ThreadNative::nativeInitCultureAccessors)
#endif
- FCFuncElement("JoinInternal", ThreadNative::Join)
#undef Sleep
FCFuncElement("SleepInternal", ThreadNative::Sleep)
#define Sleep(a) Dont_Use_Sleep(a)
FCFuncElement("SetStart", ThreadNative::SetStart)
- FCFuncElement("SetBackgroundNative", ThreadNative::SetBackground)
- FCFuncElement("IsBackgroundNative", ThreadNative::IsBackground)
#ifdef FEATURE_REMOTING
FCFuncElement("GetContextInternal", ThreadNative::GetContextFromContextID)
#endif
@@ -1303,23 +1330,15 @@ FCFuncStart(gThreadFuncs)
#ifndef FEATURE_CORECLR
FCFuncElement("ResetAbortNative", ThreadNative::ResetAbort)
#endif // FEATURE_CORECLR
- FCFuncElement("get_IsThreadPoolThread", ThreadNative::IsThreadpoolThread)
FCFuncElement("SpinWaitInternal", ThreadNative::SpinWait)
QCFuncElement("YieldInternal", ThreadNative::YieldThread)
FCIntrinsic("GetCurrentThreadNative", ThreadNative::GetCurrentThread, CORINFO_INTRINSIC_GetCurrentManagedThread)
FCIntrinsic("get_ManagedThreadId", ThreadNative::GetManagedThreadId, CORINFO_INTRINSIC_GetManagedThreadId)
FCFuncElement("InternalFinalize", ThreadNative::Finalize)
-#if defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORECLR)
- FCFuncElement("DisableComObjectEagerCleanup", ThreadNative::DisableComObjectEagerCleanup)
-#endif // defined(FEATURE_COMINTEROP) && !defined(FEATURE_CORECLR)
#ifdef FEATURE_LEAK_CULTURE_INFO
FCFuncElement("nativeSetThreadUILocale", ThreadNative::SetThreadUILocale)
#endif
#ifdef FEATURE_COMINTEROP_APARTMENT_SUPPORT
-#ifndef FEATURE_CORECLR
- FCFuncElement("SetApartmentStateNative", ThreadNative::SetApartmentState)
- FCFuncElement("GetApartmentStateNative", ThreadNative::GetApartmentState)
-#endif // FEATURE_CORECLR
FCFuncElement("StartupSetApartmentStateInternal", ThreadNative::StartupSetApartmentState)
#endif // FEATURE_COMINTEROP_APARTMENT_SUPPORT
FCIntrinsic("MemoryBarrier", ThreadNative::FCMemoryBarrier, CORINFO_INTRINSIC_MemoryBarrier)
@@ -1331,10 +1350,8 @@ FCFuncStart(gThreadFuncs)
FCFuncEnd()
FCFuncStart(gThreadPoolFuncs)
-#ifndef FEATURE_CORECLR
FCFuncElement("PostQueuedCompletionStatus", ThreadPoolNative::CorPostQueuedCompletionStatus)
FCFuncElement("GetAvailableThreadsNative", ThreadPoolNative::CorGetAvailableThreads)
-#endif // FEATURE_CORECLR
FCFuncElement("SetMinThreadsNative", ThreadPoolNative::CorSetMinThreads)
FCFuncElement("GetMinThreadsNative", ThreadPoolNative::CorGetMinThreads)
FCFuncElement("RegisterWaitForSingleObjectNative", ThreadPoolNative::CorRegisterWaitForSingleObject)
@@ -1444,8 +1461,8 @@ FCFuncStart(gCompareInfoFuncs)
QCFuncElement("InternalGetSortKey", COMNlsInfo::InternalGetSortKey)
#ifndef FEATURE_CORECLR
QCFuncElement("InternalGetSortVersion", COMNlsInfo::InternalGetSortVersion)
- QCFuncElement("InternalGetNlsVersionEx", COMNlsInfo::InternalGetNlsVersionEx)
#endif
+ QCFuncElement("InternalGetNlsVersionEx", COMNlsInfo::InternalGetNlsVersionEx)
FCFuncEnd()
FCFuncStart(gEncodingTableFuncs)
@@ -1567,11 +1584,9 @@ FCFuncStart(gGCInterfaceFuncs)
FCFuncElement("_GetAllocatedBytesForCurrentThread", GCInterface::GetAllocatedBytesForCurrentThread)
FCFuncEnd()
-#ifndef FEATURE_CORECLR
FCFuncStart(gMemoryFailPointFuncs)
FCFuncElement("GetMemorySettings", COMMemoryFailPoint::GetMemorySettings)
FCFuncEnd()
-#endif // FEATURE_CORECLR
FCFuncStart(gInteropMarshalFuncs)
FCFuncElement("GetLastWin32Error", MarshalNative::GetLastWin32Error)
@@ -1582,10 +1597,10 @@ FCFuncStart(gInteropMarshalFuncs)
FCFuncElement("DestroyStructure", MarshalNative::DestroyStructure)
FCFuncElement("UnsafeAddrOfPinnedArrayElement", MarshalNative::FCUnsafeAddrOfPinnedArrayElement)
FCFuncElement("GetExceptionCode", ExceptionNative::GetExceptionCode)
+ QCFuncElement("GetHINSTANCE", COMModule::GetHINSTANCE)
#ifndef FEATURE_CORECLR
QCFuncElement("InternalNumParamBytes", MarshalNative::NumParamBytes)
FCFuncElement("GetExceptionPointers", ExceptionNative::GetExceptionPointers)
- QCFuncElement("GetHINSTANCE", COMModule::GetHINSTANCE)
FCFuncElement("GetUnmanagedThunkForManagedMethodPtr", MarshalNative::GetUnmanagedThunkForManagedMethodPtr)
FCFuncElement("GetManagedThunkForUnmanagedMethodPtr", MarshalNative::GetManagedThunkForUnmanagedMethodPtr)
FCFuncElement("InternalGetThreadFromFiberCookie", MarshalNative::GetThreadFromFiberCookie)
@@ -1630,11 +1645,12 @@ FCFuncStart(gInteropMarshalFuncs)
FCFuncElement("InternalReleaseComObject", MarshalNative::ReleaseComObject)
FCFuncElement("Release", MarshalNative::Release)
FCFuncElement("InitializeWrapperForWinRT", MarshalNative::InitializeWrapperForWinRT)
+ FCFuncElement("GetTypedObjectForIUnknown", MarshalNative::GetTypedObjectForIUnknown)
+ FCFuncElement("ChangeWrapperHandleStrength", MarshalNative::ChangeWrapperHandleStrength)
+ FCFuncElement("CleanupUnusedObjectsInCurrentContext", MarshalNative::CleanupUnusedObjectsInCurrentContext)
#ifndef FEATURE_CORECLR
FCFuncElement("GetLoadedTypeForGUID", MarshalNative::GetLoadedTypeForGUID)
FCFuncElement("GetITypeInfoForType", MarshalNative::GetITypeInfoForType)
- FCFuncElement("GetTypedObjectForIUnknown", MarshalNative::GetTypedObjectForIUnknown)
- FCFuncElement("CleanupUnusedObjectsInCurrentContext", MarshalNative::CleanupUnusedObjectsInCurrentContext)
FCFuncElement("IsTypeVisibleFromCom", MarshalNative::IsTypeVisibleFromCom)
FCFuncElement("FCallGenerateGuidForType", MarshalNative::DoGenerateGuidForType)
FCFuncElement("FCallGetTypeLibGuid", MarshalNative::DoGetTypeLibGuid)
@@ -1648,7 +1664,6 @@ FCFuncStart(gInteropMarshalFuncs)
FCFuncElement("InternalGetComSlotForMethodInfo", MarshalNative::GetComSlotForMethodInfo)
FCFuncElement("InternalSwitchCCW", MarshalNative::SwitchCCW)
FCFuncElement("InternalWrapIUnknownWithComObject", MarshalNative::WrapIUnknownWithComObject)
- FCFuncElement("ChangeWrapperHandleStrength", MarshalNative::ChangeWrapperHandleStrength)
QCFuncElement("_GetInspectableIids", MarshalNative::GetInspectableIIDs)
QCFuncElement("_GetCachedWinRTTypes", MarshalNative::GetCachedWinRTTypes)
QCFuncElement("_GetCachedWinRTTypeByIid", MarshalNative::GetCachedWinRTTypeByIID)
@@ -1829,8 +1844,8 @@ FCFuncStart(gCompilerFuncs)
FCFuncElement("GetObjectValue", ObjectNative::GetObjectValue)
FCIntrinsic("InitializeArray", ArrayNative::InitializeArray, CORINFO_INTRINSIC_InitializeArray)
FCFuncElement("_RunClassConstructor", ReflectionInvocation::RunClassConstructor)
-#ifndef FEATURE_CORECLR
FCFuncElement("_RunModuleConstructor", ReflectionInvocation::RunModuleConstructor)
+#ifndef FEATURE_CORECLR
FCFuncElement("_PrepareMethod", ReflectionInvocation::PrepareMethod)
#endif // !FEATURE_CORECLR
QCFuncElement("_CompileMethod", ReflectionInvocation::CompileMethod)
@@ -1843,14 +1858,12 @@ FCFuncStart(gCompilerFuncs)
FCFuncElement("GetHashCode", ObjectNative::GetHashCode)
FCFuncElement("Equals", ObjectNative::Equals)
FCFuncElement("EnsureSufficientExecutionStack", ReflectionInvocation::EnsureSufficientExecutionStack)
-#ifdef FEATURE_CORECLR
FCFuncElement("TryEnsureSufficientExecutionStack", ReflectionInvocation::TryEnsureSufficientExecutionStack)
-#endif // FEATURE_CORECLR
FCFuncEnd()
FCFuncStart(gContextSynchronizationFuncs)
#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
- FCFuncElement("WaitHelper", SynchronizationContextNative::WaitHelper)
+ FCFuncElement("WaitHelperNative", SynchronizationContextNative::WaitHelper)
#endif // #ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
#ifdef FEATURE_APPX
QCFuncElement("GetWinRTDispatcherForCurrentThread", SynchronizationContextNative::GetWinRTDispatcherForCurrentThread)
@@ -2191,9 +2204,6 @@ FCClassElement("CultureData", "System.Globalization", gCultureDataFuncs)
FCClassElement("CultureInfo", "System.Globalization", gCultureInfoFuncs)
#endif
FCClassElement("Currency", "System", gCurrencyFuncs)
-#ifndef FEATURE_CORECLR
-FCClassElement("CurrentSystemTimeZone", "System", gTimeZoneFuncs)
-#endif // FEATURE_CORECLR
FCClassElement("CustomAttribute", "System.Reflection", gCOMCustomAttributeFuncs)
FCClassElement("CustomAttributeEncodedArgument", "System.Reflection", gCustomAttributeEncodedArgument)
FCClassElement("DateMarshaler", "System.StubHelpers", gDateMarshalerFuncs)
@@ -2255,12 +2265,11 @@ FCClassElement("Marshal", "System.Runtime.InteropServices", gInteropMarshalFuncs
FCClassElement("MarshalByRefObject", "System", gMarshalByRefFuncs)
#endif
FCClassElement("Math", "System", gMathFuncs)
+FCClassElement("MathF", "System", gMathFFuncs)
#ifdef MDA_SUPPORTED
FCClassElement("Mda", "System", gMda)
#endif
-#ifndef FEATURE_CORECLR
FCClassElement("MemoryFailPoint", "System.Runtime", gMemoryFailPointFuncs)
-#endif // FEATURE_CORECLR
#ifdef FEATURE_REMOTING
FCClassElement("Message", "System.Runtime.Remoting.Messaging", gMessageFuncs)
#endif
@@ -2307,7 +2316,9 @@ FCClassElement("PolicyManager", "System.Security", gPolicyManagerFuncs)
FCClassElement("ProfileOptimization", "System.Runtime", gProfileOptimizationFuncs)
#endif // defined(FEATURE_MULTICOREJIT) && !defined(FEATURE_CORECLR)
+#ifdef FEATURE_CAS_POLICY
FCClassElement("PseudoCustomAttribute", "System.Reflection", gPseudoCustomAttribute)
+#endif
#ifdef FEATURE_CORECLR
FCClassElement("PunkSafeHandle", "System.Reflection.Emit", gSymWrapperCodePunkSafeHandleFuncs)
#endif
@@ -2346,6 +2357,7 @@ FCClassElement("RuntimeFieldHandle", "System", gCOMFieldHandleNewFuncs)
FCClassElement("RuntimeHelpers", "System.Runtime.CompilerServices", gCompilerFuncs)
FCClassElement("RuntimeMethodHandle", "System", gRuntimeMethodHandle)
FCClassElement("RuntimeModule", "System.Reflection", gCOMModuleFuncs)
+FCClassElement("RuntimeThread", "Internal.Runtime.Augments", gRuntimeThreadFuncs)
FCClassElement("RuntimeType", "System", gSystem_RuntimeType)
FCClassElement("RuntimeTypeHandle", "System", gCOMTypeHandleFuncs)
FCClassElement("SafeBuffer", "System.Runtime.InteropServices", gSafeBufferFuncs)
@@ -2371,9 +2383,8 @@ FCClassElement("SafePEFileHandle", "Microsoft.Win32.SafeHandles", gPEFileFuncs)
#ifdef FEATURE_CRYPTO
FCClassElement("SafeProvHandle", "System.Security.Cryptography", gSafeProvHandleFuncs)
#endif
-#ifndef FEATURE_CORECLR
FCClassElement("SafeTypeNameParserHandle", "System", gSafeTypeNameParserHandle)
-#endif //!FEATURE_CORECLR
+
#if defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
FCClassElement("SecurityContext", "System.Security", gCOMSecurityContextFuncs)
#endif // defined(FEATURE_IMPERSONATION) || defined(FEATURE_COMPRESSEDSTACK)
@@ -2414,9 +2425,7 @@ FCClassElement("TypeLibConverter", "System.Runtime.InteropServices", gTypeLibCon
#endif
FCClassElement("TypeLoadException", "System", gTypeLoadExceptionFuncs)
FCClassElement("TypeNameBuilder", "System.Reflection.Emit", gTypeNameBuilder)
-#ifndef FEATURE_CORECLR
FCClassElement("TypeNameParser", "System", gTypeNameParser)
-#endif //!FEATURE_CORECLR
FCClassElement("TypedReference", "System", gTypedReferenceFuncs)
FCClassElement("URLString", "System.Security.Util", gCOMUrlStringFuncs)
#ifdef FEATURE_COMINTEROP
diff --git a/src/vm/eedbginterfaceimpl.cpp b/src/vm/eedbginterfaceimpl.cpp
index 93decc9b0d..ff63d846e7 100644
--- a/src/vm/eedbginterfaceimpl.cpp
+++ b/src/vm/eedbginterfaceimpl.cpp
@@ -501,9 +501,9 @@ BOOL EEDbgInterfaceImpl::IsInPrologOrEpilog(const BYTE *address,
if (codeInfo.IsValid())
{
- LPVOID methodInfo = codeInfo.GetGCInfo();
+ GCInfoToken gcInfoToken = codeInfo.GetGCInfoToken();
- if (codeInfo.GetCodeManager()->IsInPrologOrEpilog(codeInfo.GetRelOffset(), methodInfo, prologSize))
+ if (codeInfo.GetCodeManager()->IsInPrologOrEpilog(codeInfo.GetRelOffset(), gcInfoToken, prologSize))
{
return TRUE;
}
diff --git a/src/vm/eepolicy.cpp b/src/vm/eepolicy.cpp
index 8c3f2ec625..236f5afd6e 100644
--- a/src/vm/eepolicy.cpp
+++ b/src/vm/eepolicy.cpp
@@ -1368,7 +1368,7 @@ void DisplayStackOverflowException()
LIMITED_METHOD_CONTRACT;
PrintToStdErrA("\n");
- PrintToStdErrA("Process is terminated due to StackOverflowException.\n");
+ PrintToStdErrA("Process is terminating due to StackOverflowException.\n");
}
void DECLSPEC_NORETURN EEPolicy::HandleFatalStackOverflow(EXCEPTION_POINTERS *pExceptionInfo, BOOL fSkipDebugger)
diff --git a/src/vm/eetoprofinterfaceimpl.cpp b/src/vm/eetoprofinterfaceimpl.cpp
index 2ec3812159..1ceed3d81b 100644
--- a/src/vm/eetoprofinterfaceimpl.cpp
+++ b/src/vm/eetoprofinterfaceimpl.cpp
@@ -414,6 +414,7 @@ EEToProfInterfaceImpl::EEToProfInterfaceImpl() :
m_pCallback5(NULL),
m_pCallback6(NULL),
m_pCallback7(NULL),
+ m_pCallback8(NULL),
m_hmodProfilerDLL(NULL),
m_fLoadedViaAttach(FALSE),
m_pProfToEE(NULL),
@@ -664,21 +665,25 @@ HRESULT EEToProfInterfaceImpl::CreateProfiler(
m_hmodProfilerDLL = hmodProfilerDLL.Extract();
hmodProfilerDLL = NULL;
- // The profiler may optionally support ICorProfilerCallback3,4,5,6,7. Let's check.
+ // The profiler may optionally support ICorProfilerCallback3,4,5,6,7,8. Let's check.
- ReleaseHolder<ICorProfilerCallback7> pCallback7;
+ ReleaseHolder<ICorProfilerCallback8> pCallback8;
hr = m_pCallback2->QueryInterface(
- IID_ICorProfilerCallback7,
- (LPVOID *)&pCallback7);
- if (SUCCEEDED(hr) && (pCallback7 != NULL))
+ IID_ICorProfilerCallback8,
+ (LPVOID *)&pCallback8);
+ if (SUCCEEDED(hr) && (pCallback8 != NULL))
{
// Nifty. Transfer ownership to this class
- _ASSERTE(m_pCallback7 == NULL);
- m_pCallback7 = pCallback7.Extract();
- pCallback7 = NULL;
+ _ASSERTE(m_pCallback8 == NULL);
+ m_pCallback8 = pCallback8.Extract();
+ pCallback8 = NULL;
- // And while we're at it, we must now also have an ICorProfilerCallback3,4,5,6
+ // And while we're at it, we must now also have an ICorProfilerCallback3,4,5,6,7
// due to inheritance relationship of the interfaces
+ _ASSERTE(m_pCallback7 == NULL);
+ m_pCallback7 = static_cast<ICorProfilerCallback7 *>(m_pCallback8);
+ m_pCallback7->AddRef();
+
_ASSERTE(m_pCallback6 == NULL);
m_pCallback6 = static_cast<ICorProfilerCallback6 *>(m_pCallback7);
m_pCallback6->AddRef();
@@ -696,6 +701,40 @@ HRESULT EEToProfInterfaceImpl::CreateProfiler(
m_pCallback3->AddRef();
}
+ if (m_pCallback7 == NULL)
+ {
+ ReleaseHolder<ICorProfilerCallback7> pCallback7;
+ hr = m_pCallback2->QueryInterface(
+ IID_ICorProfilerCallback7,
+ (LPVOID *)&pCallback7);
+ if (SUCCEEDED(hr) && (pCallback7 != NULL))
+ {
+ // Nifty. Transfer ownership to this class
+ _ASSERTE(m_pCallback7 == NULL);
+ m_pCallback7 = pCallback7.Extract();
+ pCallback7 = NULL;
+
+ // And while we're at it, we must now also have an ICorProfilerCallback3,4,5,6
+ // due to inheritance relationship of the interfaces
+
+ _ASSERTE(m_pCallback6 == NULL);
+ m_pCallback6 = static_cast<ICorProfilerCallback6 *>(m_pCallback7);
+ m_pCallback6->AddRef();
+
+ _ASSERTE(m_pCallback5 == NULL);
+ m_pCallback5 = static_cast<ICorProfilerCallback5 *>(m_pCallback6);
+ m_pCallback5->AddRef();
+
+ _ASSERTE(m_pCallback4 == NULL);
+ m_pCallback4 = static_cast<ICorProfilerCallback4 *>(m_pCallback5);
+ m_pCallback4->AddRef();
+
+ _ASSERTE(m_pCallback3 == NULL);
+ m_pCallback3 = static_cast<ICorProfilerCallback3 *>(m_pCallback4);
+ m_pCallback3->AddRef();
+ }
+ }
+
if (m_pCallback6 == NULL)
{
ReleaseHolder<ICorProfilerCallback6> pCallback6;
@@ -873,6 +912,13 @@ EEToProfInterfaceImpl::~EEToProfInterfaceImpl()
m_pCallback7 = NULL;
}
+ if (m_pCallback8 != NULL)
+ {
+ REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
+ m_pCallback8->Release();
+ m_pCallback8 = NULL;
+ }
+
// Only unload the V4 profiler if this is not part of shutdown. This protects
// Whidbey profilers that aren't used to being FreeLibrary'd.
if (fIsV4Profiler && !g_fEEShutDown)
@@ -2294,7 +2340,7 @@ HRESULT EEToProfInterfaceImpl::SetEventMask(DWORD dwEventMask, DWORD dwEventMask
// in this function
if (g_profControlBlock.curProfStatus.Get() == kProfStatusInitializingForAttachLoad)
{
- if (GCHeap::GetGCHeap()->IsConcurrentGCEnabled())
+ if (GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled())
{
// We only allow turning off concurrent GC in the profiler attach thread inside
// InitializeForAttach, otherwise we would be vulnerable to weird races such as
@@ -2316,7 +2362,7 @@ HRESULT EEToProfInterfaceImpl::SetEventMask(DWORD dwEventMask, DWORD dwEventMask
// Fail if concurrent GC is enabled
// This should only happen for attach profilers if user didn't turn on COR_PRF_MONITOR_GC
// at attach time
- if (GCHeap::GetGCHeap()->IsConcurrentGCEnabled())
+ if (GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled())
{
return CORPROF_E_CONCURRENT_GC_NOT_PROFILABLE;
}
@@ -2384,7 +2430,7 @@ HRESULT EEToProfInterfaceImpl::SetEventMask(DWORD dwEventMask, DWORD dwEventMask
if (fNeedToTurnOffConcurrentGC)
{
// Turn off concurrent GC if it is on so that user can walk the heap safely in GC callbacks
- GCHeap * pGCHeap = GCHeap::GetGCHeap();
+ IGCHeap * pGCHeap = GCHeapUtilities::GetGCHeap();
LOG((LF_CORPROF, LL_INFO10, "**PROF: Turning off concurrent GC at attach.\n"));
@@ -3184,6 +3230,86 @@ HRESULT EEToProfInterfaceImpl::JITCompilationStarted(FunctionID functionId,
}
}
+HRESULT EEToProfInterfaceImpl::DynamicMethodJITCompilationFinished(FunctionID functionId,
+ HRESULT hrStatus,
+ BOOL fIsSafeToBlock)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+
+ // The JIT / MethodDesc code likely hold locks while this callback is made
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: DynamicMethodJITCompilationFinished 0x%p.\n",
+ functionId));
+
+ _ASSERTE(functionId);
+
+ if (m_pCallback8 == NULL)
+ {
+ return S_OK;
+ }
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback8->DynamicMethodJITCompilationFinished(functionId, hrStatus, fIsSafeToBlock);
+ }
+}
+
+HRESULT EEToProfInterfaceImpl::DynamicMethodJITCompilationStarted(FunctionID functionId,
+ BOOL fIsSafeToBlock,
+ LPCBYTE pILHeader,
+ ULONG cbILHeader)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_TRIGGERS;
+ MODE_PREEMPTIVE;
+ CAN_TAKE_LOCK;
+
+ // The JIT / MethodDesc code likely hold locks while this callback is made
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ CLR_TO_PROFILER_ENTRYPOINT((LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: DynamicMethodJITCompilationStarted 0x%p.\n",
+ functionId));
+
+ _ASSERTE(functionId);
+
+ // Currently DynamicMethodJITCompilationStarted is always called with fIsSafeToBlock==TRUE. If this ever changes,
+ // it's safe to remove this assert, but this should serve as a trigger to change our
+ // public documentation to state that this callback is no longer called in preemptive mode all the time.
+ _ASSERTE(fIsSafeToBlock);
+
+ if (m_pCallback8 == NULL)
+ {
+ return S_OK;
+ }
+
+ {
+ // All callbacks are really NOTHROW, but that's enforced partially by the profiler,
+ // whose try/catch blocks aren't visible to the contract system
+ PERMANENT_CONTRACT_VIOLATION(ThrowsViolation, ReasonProfilerCallout);
+ return m_pCallback8->DynamicMethodJITCompilationStarted(functionId, fIsSafeToBlock, pILHeader, cbILHeader);
+ }
+}
+
HRESULT EEToProfInterfaceImpl::JITCachedFunctionSearchStarted(
/* [in] */ FunctionID functionId,
/* [out] */ BOOL *pbUseCachedFunction)
@@ -5609,7 +5735,7 @@ HRESULT EEToProfInterfaceImpl::MovedReferences(GCReferencesData *pData)
LL_INFO10000,
"**PROF: MovedReferences.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
if (pData->curIdx == 0)
{
@@ -5805,7 +5931,7 @@ HRESULT EEToProfInterfaceImpl::ObjectReference(ObjectID objId,
LL_INFO100000,
"**PROF: ObjectReferences.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
{
// All callbacks are really NOTHROW, but that's enforced partially by the profiler,
@@ -5844,7 +5970,7 @@ HRESULT EEToProfInterfaceImpl::FinalizeableObjectQueued(BOOL isCritical, ObjectI
LL_INFO100,
"**PROF: Notifying profiler of finalizeable object.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
{
// All callbacks are really NOTHROW, but that's enforced partially by the profiler,
@@ -5883,7 +6009,7 @@ HRESULT EEToProfInterfaceImpl::RootReferences2(GCReferencesData *pData)
LL_INFO10000,
"**PROF: RootReferences2.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
HRESULT hr = S_OK;
@@ -5948,7 +6074,7 @@ HRESULT EEToProfInterfaceImpl::ConditionalWeakTableElementReferences(GCReference
LL_INFO10000,
"**PROF: ConditionalWeakTableElementReferences.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
HRESULT hr = S_OK;
@@ -6082,7 +6208,7 @@ HRESULT EEToProfInterfaceImpl::GarbageCollectionStarted(int cGenerations, BOOL g
LL_INFO10000,
"**PROF: GarbageCollectionStarted.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
{
// All callbacks are really NOTHROW, but that's enforced partially by the profiler,
@@ -6120,7 +6246,7 @@ HRESULT EEToProfInterfaceImpl::GarbageCollectionFinished()
LL_INFO10000,
"**PROF: GarbageCollectionFinished.\n"));
- _ASSERTE(!GCHeap::GetGCHeap()->IsConcurrentGCEnabled());
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsConcurrentGCEnabled());
{
// All callbacks are really NOTHROW, but that's enforced partially by the profiler,
diff --git a/src/vm/eetoprofinterfaceimpl.h b/src/vm/eetoprofinterfaceimpl.h
index 0390f942bb..76797fcc26 100644
--- a/src/vm/eetoprofinterfaceimpl.h
+++ b/src/vm/eetoprofinterfaceimpl.h
@@ -55,6 +55,7 @@ public:
BOOL IsCallback5Supported();
BOOL IsCallback6Supported();
BOOL IsCallback7Supported();
+ BOOL IsCallback8Supported();
HRESULT SetEventMask(DWORD dwEventMask, DWORD dwEventMaskHigh);
@@ -169,6 +170,17 @@ public:
HRESULT JITCompilationStarted(
FunctionID functionId,
BOOL fIsSafeToBlock);
+
+ HRESULT DynamicMethodJITCompilationStarted(
+ FunctionID functionId,
+ BOOL fIsSafeToBlock,
+ LPCBYTE pILHeader,
+ ULONG cbILHeader);
+
+ HRESULT DynamicMethodJITCompilationFinished(
+ FunctionID functionId,
+ HRESULT hrStatus,
+ BOOL fIsSafeToBlock);
HRESULT JITCachedFunctionSearchStarted(
/* [in] */ FunctionID functionId,
@@ -529,13 +541,14 @@ private:
// Pointer to the profiler's implementation of the callback interface(s).
// Profilers MUST support ICorProfilerCallback2.
- // Profilers MAY optionally support ICorProfilerCallback3,4,5,6,7
+ // Profilers MAY optionally support ICorProfilerCallback3,4,5,6,7,8
ICorProfilerCallback2 * m_pCallback2;
ICorProfilerCallback3 * m_pCallback3;
ICorProfilerCallback4 * m_pCallback4;
ICorProfilerCallback5 * m_pCallback5;
ICorProfilerCallback6 * m_pCallback6;
ICorProfilerCallback7 * m_pCallback7;
+ ICorProfilerCallback8 * m_pCallback8;
HMODULE m_hmodProfilerDLL;
BOOL m_fLoadedViaAttach;
diff --git a/src/vm/eetoprofinterfaceimpl.inl b/src/vm/eetoprofinterfaceimpl.inl
index 8a11118ed1..feb81ed8c0 100644
--- a/src/vm/eetoprofinterfaceimpl.inl
+++ b/src/vm/eetoprofinterfaceimpl.inl
@@ -65,6 +65,12 @@ inline BOOL EEToProfInterfaceImpl::IsCallback7Supported()
return (m_pCallback7 != NULL);
}
+inline BOOL EEToProfInterfaceImpl::IsCallback8Supported()
+{
+ LIMITED_METHOD_CONTRACT;
+ return (m_pCallback8 != NULL);
+}
+
inline FunctionIDMapper * EEToProfInterfaceImpl::GetFunctionIDMapper()
{
LIMITED_METHOD_CONTRACT;
diff --git a/src/vm/eetwain.cpp b/src/vm/eetwain.cpp
index 032bda7c96..2ce7b59578 100644
--- a/src/vm/eetwain.cpp
+++ b/src/vm/eetwain.cpp
@@ -142,13 +142,15 @@ __forceinline int decodeSigned(PTR_CBYTE& src)
/*****************************************************************************
*
- * Decodes the methodInfoPtr and returns the decoded information
- * in the hdrInfo struct. The EIP parameter is the PC location
- * within the active method.
+ * Decodes the X86 GcInfo header and returns the decoded information
+ * in the hdrInfo struct.
+ * curOffset is the code offset within the active method used in the
+ * computation of PrologOffs/EpilogOffs.
+ * Returns the size of the header (number of bytes decoded).
*/
-static size_t crackMethodInfoHdr(PTR_VOID methodInfoPtr,
- unsigned curOffset,
- hdrInfo * infoPtr)
+static size_t DecodeGCHdrInfo(GCInfoToken gcInfoToken,
+ unsigned curOffset,
+ hdrInfo * infoPtr)
{
CONTRACTL {
NOTHROW;
@@ -157,7 +159,7 @@ static size_t crackMethodInfoHdr(PTR_VOID methodInfoPtr,
SUPPORTS_DAC;
} CONTRACTL_END;
- PTR_CBYTE table = PTR_CBYTE(methodInfoPtr);
+ PTR_CBYTE table = (PTR_CBYTE) gcInfoToken.Info;
#if VERIFY_GC_TABLES
_ASSERTE(*castto(table, unsigned short *)++ == 0xFEEF);
#endif
@@ -170,7 +172,7 @@ static size_t crackMethodInfoHdr(PTR_VOID methodInfoPtr,
/* Decode the InfoHdr */
InfoHdr header;
- table = decodeHeader(table, &header);
+ table = decodeHeader(table, gcInfoToken.Version, &header);
BOOL hasArgTabOffset = FALSE;
if (header.untrackedCnt == HAS_UNTRACKED)
@@ -199,6 +201,10 @@ static size_t crackMethodInfoHdr(PTR_VOID methodInfoPtr,
_ASSERTE(header.syncStartOffset < header.syncEndOffset);
}
+ if (header.revPInvokeOffset == HAS_REV_PINVOKE_FRAME_OFFSET)
+ {
+ header.revPInvokeOffset = fastDecodeUnsigned(table);
+ }
/* Some sanity checks on header */
@@ -220,6 +226,7 @@ static size_t crackMethodInfoHdr(PTR_VOID methodInfoPtr,
infoPtr->argSize = header.argCount * 4;
infoPtr->ebpFrame = header.ebpFrame;
infoPtr->interruptible = header.interruptible;
+ infoPtr->returnKind = (ReturnKind) header.returnKind;
infoPtr->prologSize = header.prologSize;
infoPtr->epilogSize = header.epilogSize;
@@ -232,6 +239,7 @@ static size_t crackMethodInfoHdr(PTR_VOID methodInfoPtr,
infoPtr->syncStartOffset = header.syncStartOffset;
infoPtr->syncEndOffset = header.syncEndOffset;
+ infoPtr->revPInvokeOffset = header.revPInvokeOffset;
infoPtr->doubleAlign = header.doubleAlign;
infoPtr->securityCheck = header.security;
@@ -352,7 +360,7 @@ static size_t crackMethodInfoHdr(PTR_VOID methodInfoPtr,
(infoPtr->gsCookieOffset < infoPtr->stackSize) &&
((header.gsCookieOffset % sizeof(void*)) == 0));
- return table - PTR_CBYTE(methodInfoPtr);
+ return table - PTR_CBYTE(gcInfoToken.Info);
}
/*****************************************************************************/
@@ -715,7 +723,7 @@ void EECodeManager::FixContext( ContextType ctxType,
/* Extract the necessary information from the info block header */
- stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(pCodeInfo->GetGCInfo(),
+ stateBuf->hdrInfoSize = (DWORD)DecodeGCHdrInfo(pCodeInfo->GetGCInfoToken(),
dwRelOffset,
&stateBuf->hdrInfoBody);
pState->dwIsSet = 1;
@@ -836,11 +844,11 @@ HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx,
hdrInfo oldInfo, newInfo;
- crackMethodInfoHdr(pOldCodeInfo->GetGCInfo(),
+ DecodeGCHdrInfo(pOldCodeInfo->GetGCInfoToken(),
pOldCodeInfo->GetRelOffset(),
&oldInfo);
- crackMethodInfoHdr(pNewCodeInfo->GetGCInfo(),
+ DecodeGCHdrInfo(pNewCodeInfo->GetGCInfoToken(),
pNewCodeInfo->GetRelOffset(),
&newInfo);
@@ -1545,7 +1553,7 @@ bool EECodeManager::IsGcSafe( EECodeInfo *pCodeInfo,
/* Extract the necessary information from the info block header */
- table = (BYTE *)crackMethodInfoHdr(pCodeInfo->GetGCInfo(),
+ table = (BYTE *)DecodeGCHdrInfo(pCodeInfo->GetGCInfoToken(),
dwRelOffset,
&info);
@@ -3905,8 +3913,9 @@ bool EECodeManager::UnwindStackFrame(PREGDISPLAY pContext,
PTR_CBYTE methodStart = PTR_CBYTE(pCodeInfo->GetSavedMethodCode());
- PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
- DWORD curOffs = pCodeInfo->GetRelOffset();
+ GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
+ PTR_VOID methodInfoPtr = gcInfoToken.Info;
+ DWORD curOffs = pCodeInfo->GetRelOffset();
_ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
@@ -3915,7 +3924,7 @@ bool EECodeManager::UnwindStackFrame(PREGDISPLAY pContext,
{
/* Extract the necessary information from the info block header */
- stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(methodInfoPtr,
+ stateBuf->hdrInfoSize = (DWORD)DecodeGCHdrInfo(gcInfoToken,
curOffs,
&stateBuf->hdrInfoBody);
}
@@ -4097,7 +4106,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pContext,
GC_NOTRIGGER;
} CONTRACTL_END;
- PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
unsigned curOffs = pCodeInfo->GetRelOffset();
unsigned EBP = *pContext->pEbp;
@@ -4108,7 +4117,7 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pContext,
unsigned count;
hdrInfo info;
- PTR_CBYTE table = PTR_CBYTE(methodInfoPtr);
+ PTR_CBYTE table = PTR_CBYTE(gcInfoToken.Info);
#if 0
printf("EECodeManager::EnumGcRefs - EIP = %08x ESP = %08x offset = %x GC Info is at %08x\n", *pContext->pPC, ESP, curOffs, table);
#endif
@@ -4116,14 +4125,14 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pContext,
/* Extract the necessary information from the info block header */
- table += crackMethodInfoHdr(methodInfoPtr,
- curOffs,
- &info);
+ table += DecodeGCHdrInfo(gcInfoToken,
+ curOffs,
+ &info);
_ASSERTE( curOffs <= info.methodSize);
#ifdef _DEBUG
-// if ((methodInfoPtr == (void*)0x37760d0) && (curOffs == 0x264))
+// if ((gcInfoToken.Info == (void*)0x37760d0) && (curOffs == 0x264))
// __asm int 3;
if (trEnumGCRefs) {
@@ -4220,11 +4229,11 @@ bool EECodeManager::EnumGcRefs( PREGDISPLAY pContext,
/* Extract the necessary information from the info block header */
- table = PTR_CBYTE(methodInfoPtr);
+ table = PTR_CBYTE(gcInfoToken.Info);
- table += crackMethodInfoHdr(methodInfoPtr,
- curOffs,
- &info);
+ table += DecodeGCHdrInfo(gcInfoToken,
+ curOffs,
+ &info);
}
}
@@ -5030,9 +5039,9 @@ OBJECTREF* EECodeManager::GetAddrOfSecurityObject(CrawlFrame *pCF)
CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
/* Extract the necessary information from the info block header */
- stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(gcInfoToken.Info, // <TODO>truncation</TODO>
- relOffset,
- &stateBuf->hdrInfoBody);
+ stateBuf->hdrInfoSize = (DWORD)DecodeGCHdrInfo(gcInfoToken, // <TODO>truncation</TODO>
+ relOffset,
+ &stateBuf->hdrInfoBody);
pState->dwIsSet = 1;
if (stateBuf->hdrInfoBody.securityCheck)
@@ -5109,10 +5118,10 @@ OBJECTREF EECodeManager::GetInstance( PREGDISPLAY pContext,
} CONTRACTL_END;
#ifdef _TARGET_X86_
- PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
- unsigned relOffset = pCodeInfo->GetRelOffset();
+ GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
+ unsigned relOffset = pCodeInfo->GetRelOffset();
- PTR_CBYTE table = PTR_CBYTE(methodInfoPtr);
+ PTR_CBYTE table = PTR_CBYTE(gcInfoToken.Info);
hdrInfo info;
unsigned stackDepth;
TADDR taArgBase;
@@ -5120,9 +5129,9 @@ OBJECTREF EECodeManager::GetInstance( PREGDISPLAY pContext,
/* Extract the necessary information from the info block header */
- table += crackMethodInfoHdr(methodInfoPtr,
- relOffset,
- &info);
+ table += DecodeGCHdrInfo(gcInfoToken,
+ relOffset,
+ &info);
// We do not have accurate information in the prolog or the epilog
if (info.prologOffs != hdrInfo::NOT_IN_PROLOG ||
@@ -5236,14 +5245,15 @@ GenericParamContextType EECodeManager::GetParamContextType(PREGDISPLAY pCont
#ifdef _TARGET_X86_
/* Extract the necessary information from the info block header */
- PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
- unsigned relOffset = pCodeInfo->GetRelOffset();
+ GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
+ PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ unsigned relOffset = pCodeInfo->GetRelOffset();
hdrInfo info;
- PTR_CBYTE table = PTR_CBYTE(methodInfoPtr);
- table += crackMethodInfoHdr(methodInfoPtr,
- relOffset,
- &info);
+ PTR_CBYTE table = PTR_CBYTE(gcInfoToken.Info);
+ table += DecodeGCHdrInfo(gcInfoToken,
+ relOffset,
+ &info);
if (!info.genericsContext ||
info.prologOffs != hdrInfo::NOT_IN_PROLOG ||
@@ -5300,15 +5310,16 @@ PTR_VOID EECodeManager::GetParamTypeArg(PREGDISPLAY pContext,
LIMITED_METHOD_DAC_CONTRACT;
#ifdef _TARGET_X86_
- PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
- unsigned relOffset = pCodeInfo->GetRelOffset();
+ GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
+ PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ unsigned relOffset = pCodeInfo->GetRelOffset();
/* Extract the necessary information from the info block header */
hdrInfo info;
- PTR_CBYTE table = PTR_CBYTE(methodInfoPtr);
- table += crackMethodInfoHdr(methodInfoPtr,
- relOffset,
- &info);
+ PTR_CBYTE table = PTR_CBYTE(gcInfoToken.Info);
+ table += DecodeGCHdrInfo(gcInfoToken,
+ relOffset,
+ &info);
if (!info.genericsContext ||
info.prologOffs != hdrInfo::NOT_IN_PROLOG ||
@@ -5424,9 +5435,9 @@ void * EECodeManager::GetGSCookieAddr(PREGDISPLAY pContext,
/* Extract the necessary information from the info block header */
hdrInfo * info = &stateBuf->hdrInfoBody;
- stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(gcInfoToken.Info, // <TODO>truncation</TODO>
- relOffset,
- info);
+ stateBuf->hdrInfoSize = (DWORD)DecodeGCHdrInfo(gcInfoToken, // <TODO>truncation</TODO>
+ relOffset,
+ info);
pState->dwIsSet = 1;
@@ -5482,9 +5493,9 @@ void * EECodeManager::GetGSCookieAddr(PREGDISPLAY pContext,
*
* Returns true if the given IP is in the given method's prolog or epilog.
*/
-bool EECodeManager::IsInPrologOrEpilog(DWORD relPCoffset,
- PTR_VOID methodInfoPtr,
- size_t* prologSize)
+bool EECodeManager::IsInPrologOrEpilog(DWORD relPCoffset,
+ GCInfoToken gcInfoToken,
+ size_t* prologSize)
{
CONTRACTL {
NOTHROW;
@@ -5494,7 +5505,7 @@ bool EECodeManager::IsInPrologOrEpilog(DWORD relPCoffset,
#ifndef USE_GC_INFO_DECODER
hdrInfo info;
- crackMethodInfoHdr(methodInfoPtr, relPCoffset, &info);
+ DecodeGCHdrInfo(gcInfoToken, relPCoffset, &info);
if (prologSize)
*prologSize = info.prologSize;
@@ -5511,10 +5522,9 @@ bool EECodeManager::IsInPrologOrEpilog(DWORD relPCoffset,
*
* Returns true if the given IP is in the synchronized region of the method (valid for synchronized functions only)
*/
-bool EECodeManager::IsInSynchronizedRegion(
- DWORD relOffset,
- PTR_VOID methodInfoPtr,
- unsigned flags)
+bool EECodeManager::IsInSynchronizedRegion(DWORD relOffset,
+ GCInfoToken gcInfoToken,
+ unsigned flags)
{
CONTRACTL {
NOTHROW;
@@ -5524,7 +5534,7 @@ bool EECodeManager::IsInSynchronizedRegion(
#ifndef USE_GC_INFO_DECODER
hdrInfo info;
- crackMethodInfoHdr(methodInfoPtr, relOffset, &info);
+ DecodeGCHdrInfo(gcInfoToken, relOffset, &info);
// We should be called only for synchronized methods
_ASSERTE(info.syncStartOffset != INVALID_SYNC_OFFSET && info.syncEndOffset != INVALID_SYNC_OFFSET);
@@ -5558,9 +5568,8 @@ size_t EECodeManager::GetFunctionSize(GCInfoToken gcInfoToken)
#if defined(_TARGET_X86_)
hdrInfo info;
- PTR_VOID methodInfoPtr = gcInfoToken.Info;
- crackMethodInfoHdr(methodInfoPtr, 0, &info);
+ DecodeGCHdrInfo(gcInfoToken, 0, &info);
return info.methodSize;
#elif defined(USE_GC_INFO_DECODER)
@@ -5578,15 +5587,47 @@ size_t EECodeManager::GetFunctionSize(GCInfoToken gcInfoToken)
PORTABILITY_ASSERT("EECodeManager::GetFunctionSize is not implemented on this platform.");
return 0;
#endif
+}
+/*****************************************************************************
+*
+* Returns the size of a given function.
+*/
+ReturnKind EECodeManager::GetReturnKind(GCInfoToken gcInfoToken)
+{
+ CONTRACTL{
+ NOTHROW;
+ GC_NOTRIGGER;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+ if (!gcInfoToken.IsReturnKindAvailable())
+ {
+ return RT_Illegal;
+ }
+
+#if defined(_TARGET_X86_)
+ hdrInfo info;
+
+ DecodeGCHdrInfo(gcInfoToken, 0, &info);
+
+ return info.returnKind;
+#elif defined(USE_GC_INFO_DECODER)
+
+ GcInfoDecoder gcInfoDecoder(gcInfoToken, DECODE_RETURN_KIND);
+ return gcInfoDecoder.GetReturnKind();
+
+#else // !_TARGET_X86_ && !USE_GC_INFO_DECODER
+ PORTABILITY_ASSERT("EECodeManager::GetReturnKind is not implemented on this platform.");
+ return 0;
+#endif
}
/*****************************************************************************
*
* Returns the size of the frame of the given function.
*/
-unsigned int EECodeManager::GetFrameSize(PTR_VOID methodInfoPtr)
+unsigned int EECodeManager::GetFrameSize(GCInfoToken gcInfoToken)
{
CONTRACTL {
NOTHROW;
@@ -5596,7 +5637,7 @@ unsigned int EECodeManager::GetFrameSize(PTR_VOID methodInfoPtr)
#ifndef USE_GC_INFO_DECODER
hdrInfo info;
- crackMethodInfoHdr(methodInfoPtr, 0, &info);
+ DecodeGCHdrInfo(gcInfoToken, 0, &info);
// currently only used by E&C callers need to know about doubleAlign
// in all likelyhood
@@ -5624,10 +5665,10 @@ const BYTE* EECodeManager::GetFinallyReturnAddr(PREGDISPLAY pReg)
#endif
}
-BOOL EECodeManager::IsInFilter(void *methodInfoPtr,
- unsigned offset,
- PCONTEXT pCtx,
- DWORD curNestLevel)
+BOOL EECodeManager::IsInFilter(GCInfoToken gcInfoToken,
+ unsigned offset,
+ PCONTEXT pCtx,
+ DWORD curNestLevel)
{
CONTRACTL {
NOTHROW;
@@ -5640,9 +5681,9 @@ BOOL EECodeManager::IsInFilter(void *methodInfoPtr,
hdrInfo info;
- crackMethodInfoHdr(methodInfoPtr,
- offset,
- &info);
+ DecodeGCHdrInfo(gcInfoToken,
+ offset,
+ &info);
/* make sure that we have an ebp stack frame */
@@ -5668,7 +5709,7 @@ BOOL EECodeManager::IsInFilter(void *methodInfoPtr,
}
-BOOL EECodeManager::LeaveFinally(void *methodInfoPtr,
+BOOL EECodeManager::LeaveFinally(GCInfoToken gcInfoToken,
unsigned offset,
PCONTEXT pCtx)
{
@@ -5681,9 +5722,9 @@ BOOL EECodeManager::LeaveFinally(void *methodInfoPtr,
hdrInfo info;
- crackMethodInfoHdr(methodInfoPtr,
- offset,
- &info);
+ DecodeGCHdrInfo(gcInfoToken,
+ offset,
+ &info);
DWORD nestingLevel;
GetHandlerFrameInfo(&info, pCtx->Ebp, pCtx->Esp, (DWORD) IGNORE_VAL, NULL, &nestingLevel);
@@ -5707,7 +5748,7 @@ BOOL EECodeManager::LeaveFinally(void *methodInfoPtr,
#endif
}
-void EECodeManager::LeaveCatch(void *methodInfoPtr,
+void EECodeManager::LeaveCatch(GCInfoToken gcInfoToken,
unsigned offset,
PCONTEXT pCtx)
{
@@ -5724,7 +5765,7 @@ void EECodeManager::LeaveCatch(void *methodInfoPtr,
bool hasInnerFilter;
hdrInfo info;
- crackMethodInfoHdr(methodInfoPtr, offset, &info);
+ DecodeGCHdrInfo(gcInfoToken, offset, &info);
GetHandlerFrameInfo(&info, pCtx->Ebp, pCtx->Esp, (DWORD) IGNORE_VAL,
&baseSP, &nestingLevel, &hasInnerFilter);
// _ASSERTE(frameType == FR_HANDLER);
@@ -5774,17 +5815,17 @@ TADDR EECodeManager::GetAmbientSP(PREGDISPLAY pContext,
} CONTRACTL_END;
#ifdef _TARGET_X86_
- PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
+ GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
_ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
- PTR_CBYTE table = PTR_CBYTE(methodInfoPtr);
+ PTR_CBYTE table = PTR_CBYTE(gcInfoToken.Info);
/* Extract the necessary information from the info block header */
- stateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(methodInfoPtr,
- dwRelOffset,
- &stateBuf->hdrInfoBody);
+ stateBuf->hdrInfoSize = (DWORD)DecodeGCHdrInfo(gcInfoToken,
+ dwRelOffset,
+ &stateBuf->hdrInfoBody);
table += stateBuf->hdrInfoSize;
pState->dwIsSet = 1;
@@ -5868,8 +5909,8 @@ ULONG32 EECodeManager::GetStackParameterSize(EECodeInfo * pCodeInfo)
} CONTRACTL_END;
#if defined(_TARGET_X86_)
- PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
- unsigned dwOffset = pCodeInfo->GetRelOffset();
+ GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
+ unsigned dwOffset = pCodeInfo->GetRelOffset();
CodeManState state;
state.dwIsSet = 0;
@@ -5878,7 +5919,7 @@ ULONG32 EECodeManager::GetStackParameterSize(EECodeInfo * pCodeInfo)
CodeManStateBuf * pStateBuf = reinterpret_cast<CodeManStateBuf *>(state.stateBuf);
hdrInfo * pHdrInfo = &(pStateBuf->hdrInfoBody);
- pStateBuf->hdrInfoSize = (DWORD)crackMethodInfoHdr(methodInfoPtr, dwOffset, pHdrInfo);
+ pStateBuf->hdrInfoSize = (DWORD)DecodeGCHdrInfo(gcInfoToken, dwOffset, pHdrInfo);
// We need to subtract 4 here because ESPIncrOnReturn() includes the stack slot containing the return
// address.
diff --git a/src/vm/encee.cpp b/src/vm/encee.cpp
index ca4e7fe553..7f12643340 100644
--- a/src/vm/encee.cpp
+++ b/src/vm/encee.cpp
@@ -149,7 +149,11 @@ HRESULT EditAndContinueModule::ApplyEditAndContinue(
HRESULT hr = S_OK;
HENUMInternal enumENC;
-
+
+ BYTE *pLocalILMemory = NULL;
+ IMDInternalImport *pMDImport = NULL;
+ IMDInternalImport *pNewMDImport = NULL;
+
CONTRACT_VIOLATION(GCViolation); // SafeComHolder goes to preemptive mode, which will trigger a GC
SafeComHolder<IMDInternalImportENC> pIMDInternalImportENC;
SafeComHolder<IMetaDataEmit> pEmitter;
@@ -175,8 +179,7 @@ HRESULT EditAndContinueModule::ApplyEditAndContinue(
IfFailGo(hr);
// Grab the current importer.
- IMDInternalImport *pMDImport = GetMDImport();
- IMDInternalImport *pNewMDImport;
+ pMDImport = GetMDImport();
// Apply the EnC delta to this module's metadata.
IfFailGo(pMDImport->ApplyEditAndContinue(pDeltaMD, cbDeltaMD, &pNewMDImport));
@@ -195,7 +198,7 @@ HRESULT EditAndContinueModule::ApplyEditAndContinue(
IfFailGo(GetMetaDataPublicInterfaceFromInternal(pMDImport, IID_IMetaDataEmit, (void **)&pEmitter));
// Copy the deltaIL into our RVAable IL memory
- BYTE *pLocalILMemory = new BYTE[cbDeltaIL];
+ pLocalILMemory = new BYTE[cbDeltaIL];
memcpy(pLocalILMemory, pDeltaIL, cbDeltaIL);
// Enumerate all of the EnC delta tokens
@@ -203,7 +206,6 @@ HRESULT EditAndContinueModule::ApplyEditAndContinue(
IfFailGo(pIMDInternalImportENC->EnumDeltaTokensInit(&enumENC));
mdToken token;
- FieldDesc * pField = NULL;
while (pIMDInternalImportENC->EnumNext(&enumENC, &token))
{
STRESS_LOG3(LF_ENC, LL_INFO100, "EACM::AEAC: updated token 0x%x; type 0x%x; rid 0x%x\n", token, TypeFromToken(token), RidFromToken(token));
@@ -248,8 +250,7 @@ HRESULT EditAndContinueModule::ApplyEditAndContinue(
// FieldDef token - add a new field
LOG((LF_ENC, LL_INFO10000, "EACM::AEAC: Found field 0x%x\n", token));
- pField = LookupFieldDef(token);
- if (pField)
+ if (LookupFieldDef(token))
{
// Field already exists - just ignore for now
continue;
diff --git a/src/vm/eventtrace.cpp b/src/vm/eventtrace.cpp
index d8702a53e1..cec79214a4 100644
--- a/src/vm/eventtrace.cpp
+++ b/src/vm/eventtrace.cpp
@@ -431,19 +431,19 @@ ETW::SamplingLog::EtwStackWalkStatus ETW::SamplingLog::SaveCurrentStack(int skip
VOID ETW::GCLog::GCSettingsEvent()
{
- if (GCHeap::IsGCHeapInitialized())
+ if (GCHeapUtilities::IsGCHeapInitialized())
{
if (ETW_TRACING_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PRIVATE_PROVIDER_Context,
GCSettings))
{
ETW::GCLog::ETW_GC_INFO Info;
- Info.GCSettings.ServerGC = GCHeap::IsServerHeap ();
- Info.GCSettings.SegmentSize = GCHeap::GetGCHeap()->GetValidSegmentSize (FALSE);
- Info.GCSettings.LargeObjectSegmentSize = GCHeap::GetGCHeap()->GetValidSegmentSize (TRUE);
+ Info.GCSettings.ServerGC = GCHeapUtilities::IsServerHeap ();
+ Info.GCSettings.SegmentSize = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize (FALSE);
+ Info.GCSettings.LargeObjectSegmentSize = GCHeapUtilities::GetGCHeap()->GetValidSegmentSize (TRUE);
FireEtwGCSettings_V1(Info.GCSettings.SegmentSize, Info.GCSettings.LargeObjectSegmentSize, Info.GCSettings.ServerGC, GetClrInstanceId());
}
- GCHeap::GetGCHeap()->TraceGCSegments();
+ GCHeapUtilities::GetGCHeap()->DiagTraceGCSegments();
}
};
@@ -892,7 +892,7 @@ VOID ETW::GCLog::FireGcStartAndGenerationRanges(ETW_GC_INFO * pGcInfo)
// GCStart, then retrieve it
LONGLONG l64ClientSequenceNumberToLog = 0;
if ((s_l64LastClientSequenceNumber != 0) &&
- (pGcInfo->GCStart.Depth == GCHeap::GetMaxGeneration()) &&
+ (pGcInfo->GCStart.Depth == GCHeapUtilities::GetGCHeap()->GetMaxGeneration()) &&
(pGcInfo->GCStart.Reason == ETW_GC_INFO::GC_INDUCED))
{
l64ClientSequenceNumberToLog = InterlockedExchange64(&s_l64LastClientSequenceNumber, 0);
@@ -901,8 +901,8 @@ VOID ETW::GCLog::FireGcStartAndGenerationRanges(ETW_GC_INFO * pGcInfo)
FireEtwGCStart_V2(pGcInfo->GCStart.Count, pGcInfo->GCStart.Depth, pGcInfo->GCStart.Reason, pGcInfo->GCStart.Type, GetClrInstanceId(), l64ClientSequenceNumberToLog);
// Fire an event per range per generation
- GCHeap *hp = GCHeap::GetGCHeap();
- hp->DescrGenerationsToProfiler(FireSingleGenerationRangeEvent, NULL /* context */);
+ IGCHeap *hp = GCHeapUtilities::GetGCHeap();
+ hp->DiagDescrGenerations(FireSingleGenerationRangeEvent, NULL /* context */);
}
}
@@ -928,8 +928,8 @@ VOID ETW::GCLog::FireGcEndAndGenerationRanges(ULONG Count, ULONG Depth)
CLR_GC_KEYWORD))
{
// Fire an event per range per generation
- GCHeap *hp = GCHeap::GetGCHeap();
- hp->DescrGenerationsToProfiler(FireSingleGenerationRangeEvent, NULL /* context */);
+ IGCHeap *hp = GCHeapUtilities::GetGCHeap();
+ hp->DiagDescrGenerations(FireSingleGenerationRangeEvent, NULL /* context */);
// GCEnd
FireEtwGCEnd_V1(Count, Depth, GetClrInstanceId());
@@ -938,7 +938,7 @@ VOID ETW::GCLog::FireGcEndAndGenerationRanges(ULONG Count, ULONG Depth)
//---------------------------------------------------------------------------------------
//
-// Callback made by GC when we call GCHeap::DescrGenerationsToProfiler(). This is
+// Callback made by GC when we call GCHeapUtilities::DiagDescrGenerations(). This is
// called once per range per generation, and results in a single ETW event per range per
// generation.
//
@@ -1033,7 +1033,7 @@ HRESULT ETW::GCLog::ForceGCForDiagnostics()
ForcedGCHolder forcedGCHolder;
- hr = GCHeap::GetGCHeap()->GarbageCollect(
+ hr = GCHeapUtilities::GetGCHeap()->GarbageCollect(
-1, // all generations should be collected
FALSE, // low_memory_p
collection_blocking);
@@ -4468,6 +4468,12 @@ extern "C"
BOOLEAN bIsRundownTraceHandle = (context->RegistrationHandle==Microsoft_Windows_DotNETRuntimeRundownHandle);
+ // TypeSystemLog needs a notification when certain keywords are modified, so
+ // give it a hook here.
+ if (g_fEEStarted && !g_fEEShutDown && bIsPublicTraceHandle)
+ {
+ ETW::TypeSystemLog::OnKeywordsChanged();
+ }
// A manifest based provider can be enabled to multiple event tracing sessions
// As long as there is atleast 1 enabled session, IsEnabled will be TRUE
@@ -4478,13 +4484,6 @@ extern "C"
(ControlCode == EVENT_CONTROL_CODE_CAPTURE_STATE));
if(bEnabled)
{
- // TypeSystemLog needs a notification when certain keywords are modified, so
- // give it a hook here.
- if (g_fEEStarted && !g_fEEShutDown && bIsPublicTraceHandle)
- {
- ETW::TypeSystemLog::OnKeywordsChanged();
- }
-
if (bIsPrivateTraceHandle)
{
ETW::GCLog::GCSettingsEvent();
diff --git a/src/vm/eventtracepriv.h b/src/vm/eventtracepriv.h
index 0932225133..7aca5091fa 100644
--- a/src/vm/eventtracepriv.h
+++ b/src/vm/eventtracepriv.h
@@ -23,7 +23,10 @@
#define _countof(_array) (sizeof(_array)/sizeof(_array[0]))
#endif
-const UINT cbMaxEtwEvent = 64 * 1024;
+// ETW has a limitation of 64K for TOTAL event Size, however there is overhead associated with
+// the event headers. It is unclear exactly how much that is, but 1K should be sufficiently
+// far away to avoid problems without sacrificing the perf of bulk processing.
+const UINT cbMaxEtwEvent = 63 * 1024;
//---------------------------------------------------------------------------------------
// C++ copies of ETW structures
diff --git a/src/vm/excep.cpp b/src/vm/excep.cpp
index 672f315fcd..5a6f7c673f 100644
--- a/src/vm/excep.cpp
+++ b/src/vm/excep.cpp
@@ -20,7 +20,7 @@
#include "cgensys.h"
#include "comutilnative.h"
#include "siginfo.hpp"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "eedbginterfaceimpl.h" //so we can clearexception in RealCOMPlusThrow
#include "perfcounters.h"
#include "dllimportcallback.h"
@@ -1679,7 +1679,7 @@ bool FinallyIsUnwinding(EHRangeTreeNode *pNode,
BOOL LeaveCatch(ICodeManager* pEECM,
Thread *pThread,
CONTEXT *pCtx,
- void *methodInfoPtr,
+ GCInfoToken gcInfoToken,
unsigned offset)
{
CONTRACTL
@@ -1690,6 +1690,7 @@ BOOL LeaveCatch(ICodeManager* pEECM,
}
CONTRACTL_END;
+#ifndef FEATURE_PAL
// We can assert these things here, and skip a call
// to COMPlusCheckForAbort later.
@@ -1703,10 +1704,14 @@ BOOL LeaveCatch(ICodeManager* pEECM,
PopNestedExceptionRecords(esp, pCtx, pThread->GetExceptionListPtr());
// Do JIT-specific work
- pEECM->LeaveCatch(methodInfoPtr, offset, pCtx);
+ pEECM->LeaveCatch(gcInfoToken, offset, pCtx);
SetSP(pCtx, (UINT_PTR)esp);
return TRUE;
+#else // FEATURE_PAL
+ PORTABILITY_ASSERT("LeaveCatch");
+ return FALSE;
+#endif
}
#endif // WIN64EXCEPTIONS
@@ -1762,7 +1767,7 @@ HRESULT IsLegalTransition(Thread *pThread,
ICodeManager* pEECM,
PREGDISPLAY pReg,
SLOT addrStart,
- void *methodInfoPtr,
+ GCInfoToken gcInfoToken,
PCONTEXT pCtx)
{
CONTRACTL
@@ -1875,7 +1880,7 @@ HRESULT IsLegalTransition(Thread *pThread,
if (!LeaveCatch(pEECM,
pThread,
pFilterCtx,
- methodInfoPtr,
+ gcInfoToken,
offFrom))
return E_FAIL;
}
@@ -1930,7 +1935,7 @@ HRESULT IsLegalTransition(Thread *pThread,
if (!fCanSetIPOnly)
{
- if (!pEECM->LeaveFinally(methodInfoPtr,
+ if (!pEECM->LeaveFinally(gcInfoToken,
offFrom,
pFilterCtx))
return E_FAIL;
@@ -2041,7 +2046,7 @@ HRESULT SetIPFromSrcToDst(Thread *pThread,
EECodeInfo codeInfo((TADDR)(addrStart));
ICodeManager * pEECM = codeInfo.GetCodeManager();
- LPVOID methodInfoPtr = codeInfo.GetGCInfo();
+ GCInfoToken gcInfoToken = codeInfo.GetGCInfoToken();
// Do both checks here so compiler doesn't complain about skipping
// initialization b/c of goto.
@@ -2097,7 +2102,7 @@ retryForCommit:
pEECM,
pReg,
addrStart,
- methodInfoPtr,
+ gcInfoToken,
pCtx);
if (FAILED(hr))
@@ -2120,7 +2125,7 @@ retryForCommit:
pEECM,
pReg,
addrStart,
- methodInfoPtr,
+ gcInfoToken,
pCtx);
if (FAILED(hr))
@@ -2143,7 +2148,7 @@ retryForCommit:
pEECM,
pReg,
addrStart,
- methodInfoPtr,
+ gcInfoToken,
pCtx);
if (FAILED(hr))
@@ -7313,8 +7318,8 @@ AdjustContextForWriteBarrier(
void* f_IP = (void *)GetIP(pContext);
- if (f_IP >= (void *) JIT_WriteBarrierStart && f_IP <= (void *) JIT_WriteBarrierLast ||
- f_IP >= (void *) JIT_PatchedWriteBarrierStart && f_IP <= (void *) JIT_PatchedWriteBarrierLast)
+ if (((f_IP >= (void *) JIT_WriteBarrierStart) && (f_IP <= (void *) JIT_WriteBarrierLast)) ||
+ ((f_IP >= (void *) JIT_PatchedWriteBarrierStart) && (f_IP <= (void *) JIT_PatchedWriteBarrierLast)))
{
// set the exception IP to be the instruction that called the write barrier
void* callsite = (void *)GetAdjustedCallAddress(*dac_cast<PTR_PCODE>(GetSP(pContext)));
@@ -9916,47 +9921,48 @@ PTR_EHWatsonBucketTracker GetWatsonBucketTrackerForPreallocatedException(OBJECTR
goto doValidation;
}
- // Find the reference to the exception tracker corresponding to the preallocated exception,
- // starting the search from the current exception tracker (2nd arg of NULL specifies that).
-#if defined(WIN64EXCEPTIONS)
- PTR_ExceptionTracker pEHTracker = NULL;
- PTR_ExceptionTracker pPreviousEHTracker = NULL;
+ {
+ // Find the reference to the exception tracker corresponding to the preallocated exception,
+ // starting the search from the current exception tracker (2nd arg of NULL specifies that).
+ #if defined(WIN64EXCEPTIONS)
+ PTR_ExceptionTracker pEHTracker = NULL;
+ PTR_ExceptionTracker pPreviousEHTracker = NULL;
#elif _TARGET_X86_
- PTR_ExInfo pEHTracker = NULL;
- PTR_ExInfo pPreviousEHTracker = NULL;
+ PTR_ExInfo pEHTracker = NULL;
+ PTR_ExInfo pPreviousEHTracker = NULL;
#else // !(_WIN64 || _TARGET_X86_)
#error Unsupported platform
#endif // _WIN64
- if (fStartSearchFromPreviousTracker)
- {
- // Get the exception tracker previous to the current one
- pPreviousEHTracker = GetThread()->GetExceptionState()->GetCurrentExceptionTracker()->GetPreviousExceptionTracker();
+ if (fStartSearchFromPreviousTracker)
+ {
+ // Get the exception tracker previous to the current one
+ pPreviousEHTracker = GetThread()->GetExceptionState()->GetCurrentExceptionTracker()->GetPreviousExceptionTracker();
+
+ // If there is no previous tracker to start from, then simply abort the search attempt.
+ // If we couldnt find the exception tracker, then buckets are not available
+ if (pPreviousEHTracker == NULL)
+ {
+ LOG((LF_EH, LL_INFO100, "GetWatsonBucketTrackerForPreallocatedException - Couldnt find the previous EHTracker to start the search from.\n"));
+ pWBTracker = NULL;
+ goto done;
+ }
+ }
+
+ pEHTracker = GetEHTrackerForPreallocatedException(gc.oPreAllocThrowable, pPreviousEHTracker);
- // If there is no previous tracker to start from, then simply abort the search attempt.
// If we couldnt find the exception tracker, then buckets are not available
- if (pPreviousEHTracker == NULL)
+ if (pEHTracker == NULL)
{
- LOG((LF_EH, LL_INFO100, "GetWatsonBucketTrackerForPreallocatedException - Couldnt find the previous EHTracker to start the search from.\n"));
+ LOG((LF_EH, LL_INFO100, "GetWatsonBucketTrackerForPreallocatedException - Couldnt find EHTracker for preallocated exception object.\n"));
pWBTracker = NULL;
goto done;
}
- }
- pEHTracker = GetEHTrackerForPreallocatedException(gc.oPreAllocThrowable, pPreviousEHTracker);
-
- // If we couldnt find the exception tracker, then buckets are not available
- if (pEHTracker == NULL)
- {
- LOG((LF_EH, LL_INFO100, "GetWatsonBucketTrackerForPreallocatedException - Couldnt find EHTracker for preallocated exception object.\n"));
- pWBTracker = NULL;
- goto done;
+ // Get the Watson Bucket Tracker from the exception tracker
+ pWBTracker = pEHTracker->GetWatsonBucketTracker();
}
-
- // Get the Watson Bucket Tracker from the exception tracker
- pWBTracker = pEHTracker->GetWatsonBucketTracker();
-
doValidation:
_ASSERTE(pWBTracker != NULL);
@@ -12196,7 +12202,7 @@ done:
// CE can be caught in the VM and later reraised again. Examples of such scenarios
// include AD transition, COM interop, Reflection invocation, to name a few.
// In such cases, we want to mark the corruption severity for reuse upon reraise,
-// implying that when the VM does a reraise of such a exception, we should use
+// implying that when the VM does a reraise of such an exception, we should use
// the original corruption severity for the new raised exception, instead of creating
// a new one for it.
/* static */
@@ -12913,15 +12919,6 @@ void ExceptionNotifications::DeliverNotificationInternal(ExceptionNotificationHa
AppDomain *pCurDomain = GetAppDomain();
_ASSERTE(pCurDomain != NULL);
-#ifdef FEATURE_CORECLR
- if (true)
- {
- // On CoreCLR, we dont support enhanced exception notifications
- _ASSERTE(!"CoreCLR does not support enhanced exception notifications!");
- return;
- }
-#endif // FEATURE_CORECLR
-
struct
{
OBJECTREF oNotificationDelegate;
diff --git a/src/vm/excep.h b/src/vm/excep.h
index 527e3a1ed7..7ef1921593 100644
--- a/src/vm/excep.h
+++ b/src/vm/excep.h
@@ -88,9 +88,11 @@ struct ThrowCallbackType
MethodDesc * pProfilerNotify; // Context for profiler callbacks -- see COMPlusFrameHandler().
BOOL bReplaceStack; // Used to pass info to SaveStackTrace call
BOOL bSkipLastElement;// Used to pass info to SaveStackTrace call
+#ifndef FEATURE_PAL
HANDLE hCallerToken;
HANDLE hImpersonationToken;
BOOL bImpersonationTokenSet;
+#endif // !FEATURE_PAL
#ifdef _DEBUG
void * pCurrentExceptionRecord;
void * pPrevExceptionRecord;
@@ -114,9 +116,11 @@ struct ThrowCallbackType
pProfilerNotify = NULL;
bReplaceStack = FALSE;
bSkipLastElement = FALSE;
+#ifndef FEATURE_PAL
hCallerToken = NULL;
hImpersonationToken = NULL;
bImpersonationTokenSet = FALSE;
+#endif // !FEATURE_PAL
#ifdef _DEBUG
pCurrentExceptionRecord = 0;
@@ -422,10 +426,11 @@ VOID DECLSPEC_NORETURN RealCOMPlusThrowInvalidCastException(TypeHandle thCastFro
VOID DECLSPEC_NORETURN RealCOMPlusThrowInvalidCastException(OBJECTREF *pObj, TypeHandle thCastTo);
+#ifdef _TARGET_X86_
+
#include "eexcp.h"
#include "exinfo.h"
-#ifdef _TARGET_X86_
struct FrameHandlerExRecord
{
EXCEPTION_REGISTRATION_RECORD m_ExReg;
diff --git a/src/vm/exceptionhandling.cpp b/src/vm/exceptionhandling.cpp
index e59f10e070..ed155eb998 100644
--- a/src/vm/exceptionhandling.cpp
+++ b/src/vm/exceptionhandling.cpp
@@ -4730,7 +4730,7 @@ VOID DECLSPEC_NORETURN DispatchManagedException(PAL_SEHException& ex, bool isHar
}
}
- throw ex;
+ throw std::move(ex);
}
#ifdef _AMD64_
diff --git a/src/vm/exceptmacros.h b/src/vm/exceptmacros.h
index efed993a2d..2af064c96d 100644
--- a/src/vm/exceptmacros.h
+++ b/src/vm/exceptmacros.h
@@ -334,7 +334,7 @@ VOID DECLSPEC_NORETURN DispatchManagedException(PAL_SEHException& ex, bool isHar
UNREACHABLE(); \
}
-#else
+#else // FEATURE_PAL
#define INSTALL_MANAGED_EXCEPTION_DISPATCHER
#define UNINSTALL_MANAGED_EXCEPTION_DISPATCHER
diff --git a/src/vm/exinfo.cpp b/src/vm/exinfo.cpp
index 4cbc6d34c4..9e07cebaf3 100644
--- a/src/vm/exinfo.cpp
+++ b/src/vm/exinfo.cpp
@@ -76,9 +76,11 @@ void ExInfo::CopyAndClearSource(ExInfo *from)
// Finally, initialize the source ExInfo.
from->Init();
+#ifndef FEATURE_PAL
// Clear the Watson Bucketing information as well since they
// have been transferred over by the "memcpy" above.
from->GetWatsonBucketTracker()->Init();
+#endif // FEATURE_PAL
}
void ExInfo::Init()
@@ -136,8 +138,10 @@ ExInfo::ExInfo()
m_hThrowable = NULL;
Init();
+#ifndef FEATURE_PAL
// Init the WatsonBucketTracker
m_WatsonBucketTracker.Init();
+#endif // FEATURE_PAL
}
//*******************************************************************************
@@ -206,9 +210,11 @@ void ExInfo::UnwindExInfo(VOID* limit)
pPrevNestedInfo->DestroyExceptionHandle();
}
+ #ifndef FEATURE_PAL
// Free the Watson bucket details when ExInfo
// is being released
pPrevNestedInfo->GetWatsonBucketTracker()->ClearWatsonBucketDetails();
+ #endif // FEATURE_PAL
pPrevNestedInfo->m_StackTraceInfo.FreeStackTrace();
@@ -256,8 +262,10 @@ void ExInfo::UnwindExInfo(VOID* limit)
// We just do a basic Init of the current top ExInfo here.
Init();
+ #ifndef FEATURE_PAL
// Init the Watson buckets as well
GetWatsonBucketTracker()->ClearWatsonBucketDetails();
+ #endif // FEATURE_PAL
}
}
#endif // DACCESS_COMPILE
diff --git a/src/vm/exinfo.h b/src/vm/exinfo.h
index 72f2775106..2a8030fb56 100644
--- a/src/vm/exinfo.h
+++ b/src/vm/exinfo.h
@@ -79,6 +79,7 @@ public:
//
void* m_StackAddress; // A pseudo or real stack location for this record.
+#ifndef FEATURE_PAL
private:
EHWatsonBucketTracker m_WatsonBucketTracker;
public:
@@ -87,6 +88,7 @@ public:
LIMITED_METHOD_CONTRACT;
return PTR_EHWatsonBucketTracker(PTR_HOST_MEMBER_TADDR(ExInfo, this, m_WatsonBucketTracker));
}
+#endif
#ifdef FEATURE_CORRUPTING_EXCEPTIONS
private:
diff --git a/src/vm/finalizerthread.cpp b/src/vm/finalizerthread.cpp
index 5d51d33cfb..2f72b07957 100644
--- a/src/vm/finalizerthread.cpp
+++ b/src/vm/finalizerthread.cpp
@@ -295,7 +295,7 @@ Object * FinalizerThread::FinalizeAllObjects(Object* fobj, int bitToCheck)
{
return NULL;
}
- fobj = GCHeap::GetGCHeap()->GetNextFinalizable();
+ fobj = GCHeapUtilities::GetGCHeap()->GetNextFinalizable();
}
Thread *pThread = GetThread();
@@ -320,7 +320,7 @@ Object * FinalizerThread::FinalizeAllObjects(Object* fobj, int bitToCheck)
{
return NULL;
}
- fobj = GCHeap::GetGCHeap()->GetNextFinalizable();
+ fobj = GCHeapUtilities::GetGCHeap()->GetNextFinalizable();
}
else
{
@@ -337,7 +337,7 @@ Object * FinalizerThread::FinalizeAllObjects(Object* fobj, int bitToCheck)
{
return NULL;
}
- fobj = GCHeap::GetGCHeap()->GetNextFinalizable();
+ fobj = GCHeapUtilities::GetGCHeap()->GetNextFinalizable();
}
}
}
@@ -533,7 +533,7 @@ void FinalizerThread::WaitForFinalizerEvent (CLREvent *event)
case (WAIT_OBJECT_0 + kLowMemoryNotification):
//short on memory GC immediately
GetFinalizerThread()->DisablePreemptiveGC();
- GCHeap::GetGCHeap()->GarbageCollect(0, TRUE);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(0, TRUE);
GetFinalizerThread()->EnablePreemptiveGC();
//wait only on the event for 2s
switch (event->Wait(2000, FALSE))
@@ -584,7 +584,7 @@ void FinalizerThread::WaitForFinalizerEvent (CLREvent *event)
if (WaitForSingleObject(MHandles[kLowMemoryNotification], 0) == WAIT_OBJECT_0) {
//short on memory GC immediately
GetFinalizerThread()->DisablePreemptiveGC();
- GCHeap::GetGCHeap()->GarbageCollect(0, TRUE);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(0, TRUE);
GetFinalizerThread()->EnablePreemptiveGC();
}
//wait only on the event for 2s
@@ -604,7 +604,7 @@ void FinalizerThread::WaitForFinalizerEvent (CLREvent *event)
if (sLastLowMemoryFromHost != 0)
{
GetFinalizerThread()->DisablePreemptiveGC();
- GCHeap::GetGCHeap()->GarbageCollect(0, TRUE);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(0, TRUE);
GetFinalizerThread()->EnablePreemptiveGC();
}
}
@@ -677,7 +677,7 @@ VOID FinalizerThread::FinalizerThreadWorker(void *args)
{
s_forcedGCInProgress = true;
GetFinalizerThread()->DisablePreemptiveGC();
- GCHeap::GetGCHeap()->GarbageCollect(2, FALSE, collection_blocking);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(2, FALSE, collection_blocking);
GetFinalizerThread()->EnablePreemptiveGC();
s_forcedGCInProgress = false;
@@ -710,14 +710,14 @@ VOID FinalizerThread::FinalizerThreadWorker(void *args)
do
{
- last_gc_count = GCHeap::GetGCHeap()->CollectionCount(0);
+ last_gc_count = GCHeapUtilities::GetGCHeap()->CollectionCount(0);
GetFinalizerThread()->m_GCOnTransitionsOK = FALSE;
GetFinalizerThread()->EnablePreemptiveGC();
__SwitchToThread (0, ++dwSwitchCount);
GetFinalizerThread()->DisablePreemptiveGC();
// If no GCs happended, then we assume we are quiescent
GetFinalizerThread()->m_GCOnTransitionsOK = TRUE;
- } while (GCHeap::GetGCHeap()->CollectionCount(0) - last_gc_count > 0);
+ } while (GCHeapUtilities::GetGCHeap()->CollectionCount(0) - last_gc_count > 0);
}
#endif //_DEBUG
@@ -747,7 +747,7 @@ VOID FinalizerThread::FinalizerThreadWorker(void *args)
}
else if (UnloadingAppDomain == NULL)
break;
- else if (!GCHeap::GetGCHeap()->FinalizeAppDomain(UnloadingAppDomain, fRunFinalizersOnUnload))
+ else if (!GCHeapUtilities::GetGCHeap()->FinalizeAppDomain(UnloadingAppDomain, fRunFinalizersOnUnload))
{
break;
}
@@ -916,7 +916,7 @@ DWORD __stdcall FinalizerThread::FinalizerThreadStart(void *args)
if (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_FinalizeOnShutdown) != 0)
{
// Finalize all registered objects during shutdown, even they are still reachable.
- GCHeap::GetGCHeap()->SetFinalizeQueueForShutdown(FALSE);
+ GCHeapUtilities::GetGCHeap()->SetFinalizeQueueForShutdown(FALSE);
// This will apply any policy for swallowing exceptions during normal
// processing, without allowing the finalizer thread to disappear on us.
@@ -1380,7 +1380,7 @@ BOOL FinalizerThread::FinalizerThreadWatchDogHelper()
}
else
{
- prevCount = GCHeap::GetGCHeap()->GetNumberOfFinalizable();
+ prevCount = GCHeapUtilities::GetGCHeap()->GetNumberOfFinalizable();
}
DWORD maxTry = (DWORD)(totalWaitTimeout*1.0/FINALIZER_WAIT_TIMEOUT + 0.5);
@@ -1447,11 +1447,11 @@ BOOL FinalizerThread::FinalizerThreadWatchDogHelper()
}
else
{
- curCount = GCHeap::GetGCHeap()->GetNumberOfFinalizable();
+ curCount = GCHeapUtilities::GetGCHeap()->GetNumberOfFinalizable();
}
if ((prevCount <= curCount)
- && !GCHeap::GetGCHeap()->ShouldRestartFinalizerWatchDog()
+ && !GCHeapUtilities::GetGCHeap()->ShouldRestartFinalizerWatchDog()
&& (pThread == NULL || !(pThread->m_State & (Thread::TS_UserSuspendPending | Thread::TS_DebugSuspendPending)))){
if (nTry == maxTry) {
if (!s_fRaiseExitProcessEvent) {
diff --git a/src/vm/frames.cpp b/src/vm/frames.cpp
index ec7e7be63c..04a1815cf3 100644
--- a/src/vm/frames.cpp
+++ b/src/vm/frames.cpp
@@ -18,7 +18,7 @@
#include "fieldmarshaler.h"
#include "objecthandle.h"
#include "siginfo.hpp"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "dllimportcallback.h"
#include "stackwalk.h"
#include "dbginterface.h"
diff --git a/src/vm/frames.h b/src/vm/frames.h
index 0926f29cea..91ab3c3e5f 100644
--- a/src/vm/frames.h
+++ b/src/vm/frames.h
@@ -113,8 +113,10 @@
// | +-ComPrestubMethodFrame - prestub frame for calls from COM to CLR
// |
#endif //FEATURE_COMINTEROP
+#ifdef _TARGET_X86_
// | +-UMThkCallFrame - this frame represents an unmanaged->managed
// | transition through N/Direct
+#endif
// |
// +-ContextTransitionFrame - this frame is used to mark an appdomain transition
// |
@@ -1905,7 +1907,7 @@ class UnmanagedToManagedFrame : public Frame
{
friend class CheckAsmOffsets;
- VPTR_ABSTRACT_VTABLE_CLASS(UnmanagedToManagedFrame, Frame)
+ VPTR_ABSTRACT_VTABLE_CLASS_AND_CTOR(UnmanagedToManagedFrame, Frame)
public:
@@ -2897,7 +2899,7 @@ typedef DPTR(class UMThunkMarshInfo) PTR_UMThunkMarshInfo;
class UMEntryThunk;
typedef DPTR(class UMEntryThunk) PTR_UMEntryThunk;
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_)
//------------------------------------------------------------------------
// This frame guards an unmanaged->managed transition thru a UMThk
//------------------------------------------------------------------------
@@ -2925,7 +2927,7 @@ protected:
// Keep as last entry in class
DEFINE_VTABLE_GETTER_AND_CTOR_AND_DTOR(UMThkCallFrame)
};
-#endif // _TARGET_X86_
+#endif // _TARGET_X86_ && !FEATURE_PAL
#if defined(_TARGET_X86_)
//-------------------------------------------------------------------------
diff --git a/src/vm/frameworkexceptionloader.cpp b/src/vm/frameworkexceptionloader.cpp
index 7d01f82983..a33010e163 100644
--- a/src/vm/frameworkexceptionloader.cpp
+++ b/src/vm/frameworkexceptionloader.cpp
@@ -74,7 +74,7 @@ MethodTable* FrameworkExceptionLoader::GetException(RuntimeExceptionKind kind)
{
Exception *ex = GET_EXCEPTION();
- // Let non-file-not-found execeptions propagate
+ // Let non-file-not-found exceptions propagate
if (EEFileLoadException::GetFileLoadKind(ex->GetHR()) != kFileNotFoundException)
EX_RETHROW;
diff --git a/src/vm/gccover.cpp b/src/vm/gccover.cpp
index 41dc094e94..d5e7b60d1a 100644
--- a/src/vm/gccover.cpp
+++ b/src/vm/gccover.cpp
@@ -80,7 +80,7 @@ void SetupAndSprinkleBreakpoints(
gcCover->methodRegion = methodRegionInfo;
gcCover->codeMan = pCodeInfo->GetCodeManager();
- gcCover->gcInfoToken = pCodeInfo->GetGCInfoToken();
+ gcCover->gcInfoToken = pCodeInfo->GetGCInfoToken();
gcCover->callerThread = 0;
gcCover->doingEpilogChecks = true;
@@ -583,7 +583,7 @@ void GCCoverageInfo::SprinkleBreakpoints(
#ifdef _TARGET_X86_
// we will whack every instruction in the prolog and epilog to make certain
// our unwinding logic works there.
- if (codeMan->IsInPrologOrEpilog((cur - codeStart) + (DWORD)regionOffsetAdj, gcInfoToken.Info, NULL)) {
+ if (codeMan->IsInPrologOrEpilog((cur - codeStart) + (DWORD)regionOffsetAdj, gcInfoToken, NULL)) {
*cur = INTERRUPT_INSTR;
}
#endif
@@ -1234,8 +1234,8 @@ void checkAndUpdateReg(DWORD& origVal, DWORD curVal, bool gcHappened) {
// the validation infrastructure has got a bug.
_ASSERTE(gcHappened); // If the register values are different, a GC must have happened
- _ASSERTE(GCHeap::GetGCHeap()->IsHeapPointer((BYTE*) size_t(origVal))); // And the pointers involved are on the GCHeap
- _ASSERTE(GCHeap::GetGCHeap()->IsHeapPointer((BYTE*) size_t(curVal)));
+ _ASSERTE(GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE*) size_t(origVal))); // And the pointers involved are on the GCHeap
+ _ASSERTE(GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE*) size_t(curVal)));
origVal = curVal; // this is now the best estimate of what should be returned.
}
@@ -1478,7 +1478,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
if (gcCover->callerThread == 0) {
if (FastInterlockCompareExchangePointer(&gcCover->callerThread, pThread, 0) == 0) {
gcCover->callerRegs = *regs;
- gcCover->gcCount = GCHeap::GetGCHeap()->GetGcCount();
+ gcCover->gcCount = GCHeapUtilities::GetGCHeap()->GetGcCount();
bShouldUpdateProlog = false;
}
}
@@ -1527,7 +1527,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
/* are we in a prolog or epilog? If so just test the unwind logic
but don't actually do a GC since the prolog and epilog are not
GC safe points */
- if (gcCover->codeMan->IsInPrologOrEpilog(offset, gcCover->gcInfoToken.Info, NULL))
+ if (gcCover->codeMan->IsInPrologOrEpilog(offset, gcCover->gcInfoToken, NULL))
{
// We are not at a GC safe point so we can't Suspend EE (Suspend EE will yield to GC).
// But we still have to update the GC Stress instruction. We do it directly without suspending
@@ -1564,13 +1564,13 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
// instruction in the epilog (TODO: fix it for the first instr Case)
_ASSERTE(pThread->PreemptiveGCDisabled()); // Epilogs should be in cooperative mode, no GC can happen right now.
- bool gcHappened = gcCover->gcCount != GCHeap::GetGCHeap()->GetGcCount();
+ bool gcHappened = gcCover->gcCount != GCHeapUtilities::GetGCHeap()->GetGcCount();
checkAndUpdateReg(gcCover->callerRegs.Edi, *regDisp.pEdi, gcHappened);
checkAndUpdateReg(gcCover->callerRegs.Esi, *regDisp.pEsi, gcHappened);
checkAndUpdateReg(gcCover->callerRegs.Ebx, *regDisp.pEbx, gcHappened);
checkAndUpdateReg(gcCover->callerRegs.Ebp, *regDisp.pEbp, gcHappened);
- gcCover->gcCount = GCHeap::GetGCHeap()->GetGcCount();
+ gcCover->gcCount = GCHeapUtilities::GetGCHeap()->GetGcCount();
}
return;
@@ -1689,24 +1689,6 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
bool enableWhenDone = false;
if (!pThread->PreemptiveGCDisabled())
{
-#ifdef _TARGET_X86_
- // We are in preemtive mode in JITTed code. currently this can only
- // happen in a couple of instructions when we have an inlined PINVOKE
- // method.
-
- // Better be a CALL (direct or indirect),
- // or a MOV instruction (three flavors),
- // or pop ECX or add ESP xx (for cdecl pops, two flavors)
- // or cmp, je (for the PINVOKE ESP checks)
- // or lea (for PInvoke stack resilience)
- if (!(instrVal == 0xE8 || instrVal == 0xFF ||
- instrVal == 0x89 || instrVal == 0x8B || instrVal == 0xC6 ||
- instrVal == 0x59 || instrVal == 0x81 || instrVal == 0x83 ||
- instrVal == 0x3B || instrVal == 0x74 || instrVal == 0x8D))
- {
- _ASSERTE(!"Unexpected instruction in preemtive JITTED code");
- }
-#endif // _TARGET_X86_
pThread->DisablePreemptiveGC();
enableWhenDone = true;
}
@@ -1777,7 +1759,7 @@ void DoGcStress (PCONTEXT regs, MethodDesc *pMD)
// Do the actual stress work
//
- if (!GCHeap::GetGCHeap()->StressHeap())
+ if (!GCHeapUtilities::GetGCHeap()->StressHeap())
UpdateGCStressInstructionWithoutGC ();
// Must flush instruction cache before returning as instruction has been modified.
diff --git a/src/vm/gcenv.ee.cpp b/src/vm/gcenv.ee.cpp
index 2f1e4e8200..3be8384b3d 100644
--- a/src/vm/gcenv.ee.cpp
+++ b/src/vm/gcenv.ee.cpp
@@ -15,6 +15,12 @@
#include "gcenv.h"
+#ifdef FEATURE_STANDALONE_GC
+#include "gcenv.ee.h"
+#else
+#include "../gc/env/gcenv.ee.h"
+#endif // FEATURE_STANDALONE_GC
+
#include "threadsuspend.h"
#ifdef FEATURE_COMINTEROP
@@ -125,7 +131,7 @@ inline bool SafeToReportGenericParamContext(CrawlFrame* pCF)
#ifndef USE_GC_INFO_DECODER
ICodeManager * pEECM = pCF->GetCodeManager();
- if (pEECM != NULL && pEECM->IsInPrologOrEpilog(pCF->GetRelOffset(), pCF->GetGCInfo(), NULL))
+ if (pEECM != NULL && pEECM->IsInPrologOrEpilog(pCF->GetRelOffset(), pCF->GetGCInfoToken(), NULL))
{
return false;
}
@@ -550,7 +556,7 @@ void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen,
STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n", sc->promotion);
// In server GC, we should be competing for marking the statics
- if (GCHeap::MarkShouldCompeteForStatics())
+ if (GCHeapUtilities::MarkShouldCompeteForStatics())
{
if (condemned == max_gen && sc->promotion)
{
@@ -563,7 +569,7 @@ void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen,
{
STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n", pThread, pThread->GetThreadId());
- if (GCHeap::GetGCHeap()->IsThreadUsingAllocationContextHeap(
+ if (GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap(
GCToEEInterface::GetAllocContext(pThread), sc->thread_number))
{
sc->thread_under_crawl = pThread;
@@ -693,7 +699,7 @@ void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen)
SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen);
}
-alloc_context * GCToEEInterface::GetAllocContext(Thread * pThread)
+gc_alloc_context * GCToEEInterface::GetAllocContext(Thread * pThread)
{
WRAPPER_NO_CONTRACT;
return pThread->GetAllocContext();
@@ -839,3 +845,426 @@ Thread* GCToEEInterface::CreateBackgroundThread(GCBackgroundThreadFunction threa
threadStubArgs.thread->DecExternalCount(FALSE);
return NULL;
}
+
+//
+// Diagnostics code
+//
+
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+inline BOOL ShouldTrackMovementForProfilerOrEtw()
+{
+#ifdef GC_PROFILING
+ if (CORProfilerTrackGC())
+ return true;
+#endif
+
+#ifdef FEATURE_EVENT_TRACE
+ if (ETW::GCLog::ShouldTrackMovementForEtw())
+ return true;
+#endif
+
+ return false;
+}
+#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+
+void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags)
+{
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ Object *pObj = *ppObject;
+ if (dwFlags & GC_CALL_INTERIOR)
+ {
+ pObj = GCHeapUtilities::GetGCHeap()->GetContainingObject(pObj);
+ }
+ ScanRootsHelper(pObj, ppObject, pSC, dwFlags);
+#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+}
+
+// TODO - at some point we would like to completely decouple profiling
+// from ETW tracing using a pattern similar to this, where the
+// ProfilingScanContext has flags about whether or not certain things
+// should be tracked, and each one of these ProfilerShouldXYZ functions
+// will check these flags and determine what to do based upon that.
+// GCProfileWalkHeapWorker can, in turn, call those methods without fear
+// of things being ifdef'd out.
+
+// Returns TRUE if GC profiling is enabled and the profiler
+// should scan dependent handles, FALSE otherwise.
+BOOL ProfilerShouldTrackConditionalWeakTableElements()
+{
+#if defined(GC_PROFILING)
+ return CORProfilerTrackConditionalWeakTableElements();
+#else
+ return FALSE;
+#endif // defined (GC_PROFILING)
+}
+
+// If GC profiling is enabled, informs the profiler that we are done
+// tracing dependent handles.
+void ProfilerEndConditionalWeakTableElementReferences(void* heapId)
+{
+#if defined (GC_PROFILING)
+ g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId);
+#else
+ UNREFERENCED_PARAMETER(heapId);
+#endif // defined (GC_PROFILING)
+}
+
+// If GC profiling is enabled, informs the profiler that we are done
+// tracing root references.
+void ProfilerEndRootReferences2(void* heapId)
+{
+#if defined (GC_PROFILING)
+ g_profControlBlock.pProfInterface->EndRootReferences2(heapId);
+#else
+ UNREFERENCED_PARAMETER(heapId);
+#endif // defined (GC_PROFILING)
+}
+
+void GcScanRootsForProfilerAndETW(promote_func* fn, int condemned, int max_gen, ScanContext* sc)
+{
+ Thread* pThread = NULL;
+ while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL)
+ {
+ sc->thread_under_crawl = pThread;
+#ifdef FEATURE_EVENT_TRACE
+ sc->dwEtwRootKind = kEtwGCRootKindStack;
+#endif // FEATURE_EVENT_TRACE
+ ScanStackRoots(pThread, fn, sc);
+#ifdef FEATURE_EVENT_TRACE
+ sc->dwEtwRootKind = kEtwGCRootKindOther;
+#endif // FEATURE_EVENT_TRACE
+ }
+}
+
+void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, BOOL isDependent)
+{
+ ProfilingScanContext* pSC = (ProfilingScanContext*)context;
+
+#ifdef GC_PROFILING
+ // Give the profiler the objectref.
+ if (pSC->fProfilerPinned)
+ {
+ if (!isDependent)
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackGC());
+ g_profControlBlock.pProfInterface->RootReference2(
+ (uint8_t *)*pRef,
+ kEtwGCRootKindHandle,
+ (EtwGCRootFlags)flags,
+ pRef,
+ &pSC->pHeapId);
+ END_PIN_PROFILER();
+ }
+ else
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements());
+ g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference(
+ (uint8_t*)*pRef,
+ (uint8_t*)pSec,
+ pRef,
+ &pSC->pHeapId);
+ END_PIN_PROFILER();
+ }
+ }
+#endif // GC_PROFILING
+
+#if defined(FEATURE_EVENT_TRACE)
+ // Notify ETW of the handle
+ if (ETW::GCLog::ShouldWalkHeapRootsForEtw())
+ {
+ ETW::GCLog::RootReference(
+ pRef,
+ *pRef, // object being rooted
+ pSec, // pSecondaryNodeForDependentHandle
+ isDependent,
+ pSC,
+ 0, // dwGCFlags,
+ flags); // ETW handle flags
+ }
+#endif // defined(FEATURE_EVENT_TRACE)
+}
+
+// This is called only if we've determined that either:
+// a) The Profiling API wants to do a walk of the heap, and it has pinned the
+// profiler in place (so it cannot be detached), and it's thus safe to call into the
+// profiler, OR
+// b) ETW infrastructure wants to do a walk of the heap either to log roots,
+// objects, or both.
+// This can also be called to do a single walk for BOTH a) and b) simultaneously. Since
+// ETW can ask for roots, but not objects
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw)
+{
+ {
+ ProfilingScanContext SC(fProfilerPinned);
+
+ // **** Scan roots: Only scan roots if profiling API wants them or ETW wants them.
+ if (fProfilerPinned || fShouldWalkHeapRootsForEtw)
+ {
+ GcScanRootsForProfilerAndETW(&ProfScanRootsHelper, max_generation, max_generation, &SC);
+ SC.dwEtwRootKind = kEtwGCRootKindFinalizer;
+ GCHeapUtilities::GetGCHeap()->DiagScanFinalizeQueue(&ProfScanRootsHelper, &SC);
+
+ // Handles are kept independent of wks/svr/concurrent builds
+ SC.dwEtwRootKind = kEtwGCRootKindHandle;
+ GCHeapUtilities::GetGCHeap()->DiagScanHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
+
+ // indicate that regular handle scanning is over, so we can flush the buffered roots
+ // to the profiler. (This is for profapi only. ETW will flush after the
+ // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
+ if (fProfilerPinned)
+ {
+ ProfilerEndRootReferences2(&SC.pHeapId);
+ }
+ }
+
+ // **** Scan dependent handles: only if the profiler supports it or ETW wants roots
+ if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) ||
+ fShouldWalkHeapRootsForEtw)
+ {
+ // GcScanDependentHandlesForProfiler double-checks
+ // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler
+
+ ProfilingScanContext* pSC = &SC;
+
+ // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2
+ // (-1)), so reset it to NULL
+ _ASSERTE((*((size_t *)(&pSC->pHeapId)) == (size_t)(-1)) ||
+ (*((size_t *)(&pSC->pHeapId)) == (size_t)(0)));
+ pSC->pHeapId = NULL;
+
+ GCHeapUtilities::GetGCHeap()->DiagScanDependentHandles(&ScanHandleForProfilerAndETW, max_generation, &SC);
+
+ // indicate that dependent handle scanning is over, so we can flush the buffered roots
+ // to the profiler. (This is for profapi only. ETW will flush after the
+ // entire heap was is complete, via ETW::GCLog::EndHeapDump.)
+ if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements())
+ {
+ ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId);
+ }
+ }
+
+ ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext);
+
+ // **** Walk objects on heap: only if profiling API wants them or ETW wants them.
+ if (fProfilerPinned || fShouldWalkHeapObjectsForEtw)
+ {
+ GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, TRUE /* walk the large object heap */);
+ }
+
+#ifdef FEATURE_EVENT_TRACE
+ // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers
+ // should be flushed into the ETW stream
+ if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw)
+ {
+ ETW::GCLog::EndHeapDump(&profilerWalkHeapContext);
+ }
+#endif // FEATURE_EVENT_TRACE
+ }
+}
+#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+
+void GCProfileWalkHeap()
+{
+ BOOL fWalkedHeapForProfiler = FALSE;
+
+#ifdef FEATURE_EVENT_TRACE
+ if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw())
+ ETW::GCLog::WalkStaticsAndCOMForETW();
+
+ BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw();
+ BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw();
+#else // !FEATURE_EVENT_TRACE
+ BOOL fShouldWalkHeapRootsForEtw = FALSE;
+ BOOL fShouldWalkHeapObjectsForEtw = FALSE;
+#endif // FEATURE_EVENT_TRACE
+
+#if defined (GC_PROFILING)
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackGC());
+ GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
+ fWalkedHeapForProfiler = TRUE;
+ END_PIN_PROFILER();
+ }
+#endif // defined (GC_PROFILING)
+
+#if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE
+ // is defined, since both of them make use of the walk heap worker.
+ if (!fWalkedHeapForProfiler &&
+ (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw))
+ {
+ GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw);
+ }
+#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+}
+
+void WalkFReachableObjects(BOOL isCritical, void* objectID)
+{
+ g_profControlBlock.pProfInterface->FinalizeableObjectQueued(isCritical, (ObjectID)objectID);
+}
+
+static fq_walk_fn g_FQWalkFn = &WalkFReachableObjects;
+
+void GCToEEInterface::DiagGCStart(int gen, bool isInduced)
+{
+#ifdef GC_PROFILING
+ DiagUpdateGenerationBounds();
+ GarbageCollectionStartedCallback(gen, isInduced);
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackGC());
+ size_t context = 0;
+
+ // When we're walking objects allocated by class, then we don't want to walk the large
+ // object heap because then it would count things that may have been around for a while.
+ GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, FALSE);
+
+ // Notify that we've reached the end of the Gen 0 scan
+ g_profControlBlock.pProfInterface->EndAllocByClass(&context);
+ END_PIN_PROFILER();
+ }
+
+#endif // GC_PROFILING
+}
+
+void GCToEEInterface::DiagUpdateGenerationBounds()
+{
+#ifdef GC_PROFILING
+ if (CORProfilerTrackGC())
+ UpdateGenerationBounds();
+#endif // GC_PROFILING
+}
+
+void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent)
+{
+#ifdef GC_PROFILING
+ if (!fConcurrent)
+ {
+ GCProfileWalkHeap();
+ DiagUpdateGenerationBounds();
+ GarbageCollectionFinishedCallback();
+ }
+#endif // GC_PROFILING
+}
+
+void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext)
+{
+#ifdef GC_PROFILING
+ if (CORProfilerTrackGC())
+ {
+ BEGIN_PIN_PROFILER(CORProfilerPresent());
+ GCHeapUtilities::GetGCHeap()->DiagWalkFinalizeQueue(gcContext, g_FQWalkFn);
+ END_PIN_PROFILER();
+ }
+#endif //GC_PROFILING
+}
+
+// Note on last parameter: when calling this for bgc, only ETW
+// should be sending these events so that existing profapi profilers
+// don't get confused.
+void WalkMovedReferences(uint8_t* begin, uint8_t* end,
+ ptrdiff_t reloc,
+ size_t context,
+ BOOL fCompacting,
+ BOOL fBGC)
+{
+ ETW::GCLog::MovedReference(begin, end,
+ (fCompacting ? reloc : 0),
+ context,
+ fCompacting,
+ !fBGC);
+}
+
+void GCToEEInterface::DiagWalkSurvivors(void* gcContext)
+{
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ if (ShouldTrackMovementForProfilerOrEtw())
+ {
+ size_t context = 0;
+ ETW::GCLog::BeginMovedReferences(&context);
+ GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, context, walk_for_gc);
+ ETW::GCLog::EndMovedReferences(context);
+ }
+#endif //GC_PROFILING || FEATURE_EVENT_TRACE
+}
+
+void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext)
+{
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ if (ShouldTrackMovementForProfilerOrEtw())
+ {
+ size_t context = 0;
+ ETW::GCLog::BeginMovedReferences(&context);
+ GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, context, walk_for_loh);
+ ETW::GCLog::EndMovedReferences(context);
+ }
+#endif //GC_PROFILING || FEATURE_EVENT_TRACE
+}
+
+void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext)
+{
+#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
+ if (ShouldTrackMovementForProfilerOrEtw())
+ {
+ size_t context = 0;
+ ETW::GCLog::BeginMovedReferences(&context);
+ GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, context, walk_for_bgc);
+ ETW::GCLog::EndMovedReferences(context);
+ }
+#endif //GC_PROFILING || FEATURE_EVENT_TRACE
+}
+
+void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args)
+{
+ assert(args != nullptr);
+ switch (args->operation)
+ {
+ case WriteBarrierOp::StompResize:
+ // StompResize requires a new card table, a new lowest address, and
+ // a new highest address
+ assert(args->card_table != nullptr);
+ assert(args->lowest_address != nullptr);
+ assert(args->highest_address != nullptr);
+ g_card_table = args->card_table;
+ ::StompWriteBarrierResize(args->is_runtime_suspended, args->requires_upper_bounds_check);
+
+ // We need to make sure that other threads executing checked write barriers
+ // will see the g_card_table update before g_lowest/highest_address updates.
+ // Otherwise, the checked write barrier may AV accessing the old card table
+ // with address that it does not cover. Write barriers access card table
+ // without memory barriers for performance reasons, so we need to flush
+ // the store buffers here.
+ FlushProcessWriteBuffers();
+
+ g_lowest_address = args->lowest_address;
+ VolatileStore(&g_highest_address, args->highest_address);
+ return;
+ case WriteBarrierOp::StompEphemeral:
+ // StompEphemeral requires a new ephemeral low and a new ephemeral high
+ assert(args->ephemeral_lo != nullptr);
+ assert(args->ephemeral_hi != nullptr);
+ g_ephemeral_low = args->ephemeral_lo;
+ g_ephemeral_high = args->ephemeral_hi;
+ ::StompWriteBarrierEphemeral(args->is_runtime_suspended);
+ return;
+ case WriteBarrierOp::Initialize:
+ // This operation should only be invoked once, upon initialization.
+ assert(g_card_table == nullptr);
+ assert(g_lowest_address == nullptr);
+ assert(g_highest_address == nullptr);
+ assert(args->card_table != nullptr);
+ assert(args->lowest_address != nullptr);
+ assert(args->highest_address != nullptr);
+ assert(args->is_runtime_suspended && "the runtime must be suspended here!");
+ assert(!args->requires_upper_bounds_check && "the ephemeral generation must be at the top of the heap!");
+
+ g_card_table = args->card_table;
+ FlushProcessWriteBuffers();
+ g_lowest_address = args->lowest_address;
+ VolatileStore(&g_highest_address, args->highest_address);
+ ::StompWriteBarrierResize(true, false);
+ return;
+ default:
+ assert(!"unknown WriteBarrierOp enum");
+ }
+}
diff --git a/src/vm/gcenv.ee.h b/src/vm/gcenv.ee.h
index 1d6c9bf78b..f4312217ec 100644
--- a/src/vm/gcenv.ee.h
+++ b/src/vm/gcenv.ee.h
@@ -2,4 +2,48 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-#include "../gc/env/gcenv.ee.h"
+#ifndef _GCENV_EE_H_
+#define _GCENV_EE_H_
+
+#include "gcinterface.h"
+
+#ifdef FEATURE_STANDALONE_GC
+
+class GCToEEInterface : public IGCToCLR {
+public:
+ GCToEEInterface() = default;
+ ~GCToEEInterface() = default;
+
+ void SuspendEE(SUSPEND_REASON reason);
+ void RestartEE(bool bFinishedGC);
+ void GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc);
+ void GcStartWork(int condemned, int max_gen);
+ void AfterGcScanRoots(int condemned, int max_gen, ScanContext* sc);
+ void GcBeforeBGCSweepWork();
+ void GcDone(int condemned);
+ bool RefCountedHandleCallbacks(Object * pObject);
+ void SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2);
+ void SyncBlockCacheDemote(int max_gen);
+ void SyncBlockCachePromotionsGranted(int max_gen);
+ bool IsPreemptiveGCDisabled(Thread * pThread);
+ void EnablePreemptiveGC(Thread * pThread);
+ void DisablePreemptiveGC(Thread * pThread);
+ gc_alloc_context * GetAllocContext(Thread * pThread);
+ bool CatchAtSafePoint(Thread * pThread);
+ void GcEnumAllocContexts(enum_alloc_context_func* fn, void* param);
+ Thread* CreateBackgroundThread(GCBackgroundThreadFunction threadStart, void* arg);
+
+ // Diagnostics methods.
+ void DiagGCStart(int gen, bool isInduced);
+ void DiagUpdateGenerationBounds();
+ void DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent);
+ void DiagWalkFReachableObjects(void* gcContext);
+ void DiagWalkSurvivors(void* gcContext);
+ void DiagWalkLOHSurvivors(void* gcContext);
+ void DiagWalkBGCSurvivors(void* gcContext);
+ void StompWriteBarrier(WriteBarrierParameters* args);
+};
+
+#endif // FEATURE_STANDALONE_GC
+
+#endif // _GCENV_EE_H_ \ No newline at end of file
diff --git a/src/vm/gcenv.h b/src/vm/gcenv.h
index 08dcc711ae..ad5baa262e 100644
--- a/src/vm/gcenv.h
+++ b/src/vm/gcenv.h
@@ -48,8 +48,6 @@
#include "util.hpp"
-#include "gcenv.ee.h"
-#include "gcenv.os.h"
#include "gcenv.interlocked.h"
#include "gcenv.interlocked.inl"
diff --git a/src/vm/gcenv.os.cpp b/src/vm/gcenv.os.cpp
index 73b21a7a0b..52789b835c 100644
--- a/src/vm/gcenv.os.cpp
+++ b/src/vm/gcenv.os.cpp
@@ -18,6 +18,12 @@
#include <Psapi.h>
#endif
+#ifdef Sleep
+#undef Sleep
+#endif // Sleep
+
+#include "env/gcenv.os.h"
+
#define MAX_PTR ((uint8_t*)(~(ptrdiff_t)0))
// Initialize the interface implementation
@@ -160,7 +166,7 @@ void GCToOSInterface::YieldThread(uint32_t switchCount)
// flags - flags to control special settings like write watching
// Return:
// Starting virtual address of the reserved range
-void* GCToOSInterface::VirtualReserve(void* address, size_t size, size_t alignment, uint32_t flags)
+void* GCToOSInterface::VirtualReserve(size_t size, size_t alignment, uint32_t flags)
{
LIMITED_METHOD_CONTRACT;
@@ -249,7 +255,7 @@ bool GCToOSInterface::SupportsWriteWatch()
// check if the OS supports write-watch.
// Drawbridge does not support write-watch so we still need to do the runtime detection for them.
// Otherwise, all currently supported OSes do support write-watch.
- void* mem = VirtualReserve (0, g_SystemInfo.dwAllocationGranularity, 0, VirtualReserveFlags::WriteWatch);
+ void* mem = VirtualReserve (g_SystemInfo.dwAllocationGranularity, 0, VirtualReserveFlags::WriteWatch);
if (mem != NULL)
{
VirtualRelease (mem, g_SystemInfo.dwAllocationGranularity);
@@ -364,23 +370,6 @@ static size_t g_RestrictedPhysicalMemoryLimit = (size_t)MAX_PTR;
typedef BOOL (WINAPI *PIS_PROCESS_IN_JOB)(HANDLE processHandle, HANDLE jobHandle, BOOL* result);
typedef BOOL (WINAPI *PQUERY_INFORMATION_JOB_OBJECT)(HANDLE jobHandle, JOBOBJECTINFOCLASS jobObjectInfoClass, void* lpJobObjectInfo, DWORD cbJobObjectInfoLength, LPDWORD lpReturnLength);
-#ifdef FEATURE_CORECLR
-// For coresys we need to look for an API in some apiset dll on win8 if we can't find it
-// in the traditional dll.
-HINSTANCE LoadDllForAPI(WCHAR* dllTraditional, WCHAR* dllApiSet)
-{
- HINSTANCE hinst = WszLoadLibrary(dllTraditional);
-
- if (!hinst)
- {
- if(RunningOnWin8())
- hinst = WszLoadLibrary(dllApiSet);
- }
-
- return hinst;
-}
-#endif
-
static size_t GetRestrictedPhysicalMemoryLimit()
{
LIMITED_METHOD_CONTRACT;
@@ -392,10 +381,7 @@ static size_t GetRestrictedPhysicalMemoryLimit()
size_t job_physical_memory_limit = (size_t)MAX_PTR;
BOOL in_job_p = FALSE;
#ifdef FEATURE_CORECLR
- HINSTANCE hinstApiSetPsapiOrKernel32 = 0;
- // these 2 modules will need to be freed no matter what as we only use them locally in this method.
- HINSTANCE hinstApiSetJob1OrKernel32 = 0;
- HINSTANCE hinstApiSetJob2OrKernel32 = 0;
+ HINSTANCE hinstKernel32 = 0;
#else
HINSTANCE hinstPsapi = 0;
#endif
@@ -403,17 +389,7 @@ static size_t GetRestrictedPhysicalMemoryLimit()
PIS_PROCESS_IN_JOB GCIsProcessInJob = 0;
PQUERY_INFORMATION_JOB_OBJECT GCQueryInformationJobObject = 0;
-#ifdef FEATURE_CORECLR
- hinstApiSetJob1OrKernel32 = LoadDllForAPI(L"kernel32.dll", L"api-ms-win-core-job-l1-1-0.dll");
- if (!hinstApiSetJob1OrKernel32)
- goto exit;
-
- GCIsProcessInJob = (PIS_PROCESS_IN_JOB)GetProcAddress(hinstApiSetJob1OrKernel32, "IsProcessInJob");
- if (!GCIsProcessInJob)
- goto exit;
-#else
GCIsProcessInJob = &(::IsProcessInJob);
-#endif
if (!GCIsProcessInJob(GetCurrentProcess(), NULL, &in_job_p))
goto exit;
@@ -421,11 +397,11 @@ static size_t GetRestrictedPhysicalMemoryLimit()
if (in_job_p)
{
#ifdef FEATURE_CORECLR
- hinstApiSetPsapiOrKernel32 = LoadDllForAPI(L"kernel32.dll", L"api-ms-win-core-psapi-l1-1-0");
- if (!hinstApiSetPsapiOrKernel32)
+ hinstKernel32 = WszLoadLibrary(L"kernel32.dll");
+ if (!hinstKernel32)
goto exit;
- GCGetProcessMemoryInfo = (PGET_PROCESS_MEMORY_INFO)GetProcAddress(hinstApiSetPsapiOrKernel32, "K32GetProcessMemoryInfo");
+ GCGetProcessMemoryInfo = (PGET_PROCESS_MEMORY_INFO)GetProcAddress(hinstKernel32, "K32GetProcessMemoryInfo");
#else
// We need a way to get the working set in a job object and GetProcessMemoryInfo
// is the way to get that. According to MSDN, we should use GetProcessMemoryInfo In order to
@@ -439,15 +415,7 @@ static size_t GetRestrictedPhysicalMemoryLimit()
if (!GCGetProcessMemoryInfo)
goto exit;
-#ifdef FEATURE_CORECLR
- hinstApiSetJob2OrKernel32 = LoadDllForAPI(L"kernel32.dll", L"api-ms-win-core-job-l2-1-0");
- if (!hinstApiSetJob2OrKernel32)
- goto exit;
-
- GCQueryInformationJobObject = (PQUERY_INFORMATION_JOB_OBJECT)GetProcAddress(hinstApiSetJob2OrKernel32, "QueryInformationJobObject");
-#else
GCQueryInformationJobObject = &(::QueryInformationJobObject);
-#endif
if (!GCQueryInformationJobObject)
goto exit;
@@ -490,19 +458,12 @@ static size_t GetRestrictedPhysicalMemoryLimit()
}
exit:
-#ifdef FEATURE_CORECLR
- if (hinstApiSetJob1OrKernel32)
- FreeLibrary(hinstApiSetJob1OrKernel32);
- if (hinstApiSetJob2OrKernel32)
- FreeLibrary(hinstApiSetJob2OrKernel32);
-#endif
-
if (job_physical_memory_limit == (size_t)MAX_PTR)
{
job_physical_memory_limit = 0;
#ifdef FEATURE_CORECLR
- FreeLibrary(hinstApiSetPsapiOrKernel32);
+ FreeLibrary(hinstKernel32);
#else
FreeLibrary(hinstPsapi);
#endif
@@ -633,7 +594,7 @@ struct GCThreadStubParam
};
// GC thread stub to convert GC thread function to an OS specific thread function
-static DWORD GCThreadStub(void* param)
+static DWORD __stdcall GCThreadStub(void* param)
{
WRAPPER_NO_CONTRACT;
diff --git a/src/vm/gcheaputilities.cpp b/src/vm/gcheaputilities.cpp
new file mode 100644
index 0000000000..ac24fa34ce
--- /dev/null
+++ b/src/vm/gcheaputilities.cpp
@@ -0,0 +1,19 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+#include "gcheaputilities.h"
+
+// These globals are variables used within the GC and maintained
+// by the EE for use in write barriers. It is the responsibility
+// of the GC to communicate updates to these globals to the EE through
+// GCToEEInterface::StompWriteBarrierResize and GCToEEInterface::StompWriteBarrierEphemeral.
+GPTR_IMPL_INIT(uint32_t, g_card_table, nullptr);
+GPTR_IMPL_INIT(uint8_t, g_lowest_address, nullptr);
+GPTR_IMPL_INIT(uint8_t, g_highest_address, nullptr);
+uint8_t* g_ephemeral_low = (uint8_t*)1;
+uint8_t* g_ephemeral_high = (uint8_t*)~0;
+
+// This is the global GC heap, maintained by the VM.
+GPTR_IMPL(IGCHeap, g_pGCHeap); \ No newline at end of file
diff --git a/src/vm/gcheaputilities.h b/src/vm/gcheaputilities.h
new file mode 100644
index 0000000000..e5883fc919
--- /dev/null
+++ b/src/vm/gcheaputilities.h
@@ -0,0 +1,129 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#ifndef _GCHEAPUTILITIES_H_
+#define _GCHEAPUTILITIES_H_
+
+#include "gcinterface.h"
+
+// The singular heap instance.
+GPTR_DECL(IGCHeap, g_pGCHeap);
+
+// GCHeapUtilities provides a number of static methods
+// that operate on the global heap instance. It can't be
+// instantiated.
+class GCHeapUtilities {
+public:
+ // Retrieves the GC heap.
+ inline static IGCHeap* GetGCHeap()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ assert(g_pGCHeap != nullptr);
+ return g_pGCHeap;
+ }
+
+ // Returns true if the heap has been initialized, false otherwise.
+ inline static bool IsGCHeapInitialized()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ return g_pGCHeap != nullptr;
+ }
+
+ // Returns true if a the heap is initialized and a garbage collection
+ // is in progress, false otherwise.
+ inline static BOOL IsGCInProgress(BOOL bConsiderGCStart = FALSE)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return (IsGCHeapInitialized() ? GetGCHeap()->IsGCInProgressHelper(bConsiderGCStart) : false);
+ }
+
+ // Returns true if we should be competing marking for statics. This
+ // influences the behavior of `GCToEEInterface::GcScanRoots`.
+ inline static BOOL MarkShouldCompeteForStatics()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return IsServerHeap() && g_SystemInfo.dwNumberOfProcessors >= 2;
+ }
+
+ // Waits until a GC is complete, if the heap has been initialized.
+ inline static void WaitForGCCompletion(BOOL bConsiderGCStart = FALSE)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (IsGCHeapInitialized())
+ GetGCHeap()->WaitUntilGCComplete(bConsiderGCStart);
+ }
+
+ // Returns true if we should be using allocation contexts, false otherwise.
+ inline static bool UseAllocationContexts()
+ {
+ WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_REDHAWK
+ // SIMPLIFY: only use allocation contexts
+ return true;
+#else
+#if defined(_TARGET_ARM_) || defined(FEATURE_PAL)
+ return true;
+#else
+ return ((IsServerHeap() ? true : (g_SystemInfo.dwNumberOfProcessors >= 2)));
+#endif
+#endif
+ }
+
+ // Returns true if the held GC heap is a Server GC heap, false otherwise.
+ inline static bool IsServerHeap()
+ {
+ LIMITED_METHOD_CONTRACT;
+#ifdef FEATURE_SVR_GC
+ _ASSERTE(IGCHeap::gcHeapType != IGCHeap::GC_HEAP_INVALID);
+ return (IGCHeap::gcHeapType == IGCHeap::GC_HEAP_SVR);
+#else // FEATURE_SVR_GC
+ return false;
+#endif // FEATURE_SVR_GC
+ }
+
+ // Gets the maximum generation number by reading the static field
+ // on IGCHeap. This should only be done by the DAC code paths - all other code
+ // should go through IGCHeap::GetMaxGeneration.
+ //
+ // The reason for this is that, while we are in the early stages of
+ // decoupling the GC, the GC and the DAC still remain tightly coupled
+ // and, in particular, the DAC needs to know how many generations the GC
+ // has. However, it is not permitted to invoke virtual methods on g_pGCHeap
+ // while on a DAC code path. Therefore, we need to determine the max generation
+ // non-virtually, while still in a manner consistent with the interface -
+ // therefore, a static field is used.
+ //
+ // This is not without precedent - IGCHeap::gcHeapType is a static field used
+ // for a similar reason (the DAC needs to know what kind of heap it's looking at).
+ inline static unsigned GetMaxGeneration()
+ {
+ WRAPPER_NO_CONTRACT;
+
+ return IGCHeap::maxGeneration;
+ }
+
+private:
+ // This class should never be instantiated.
+ GCHeapUtilities() = delete;
+};
+
+#ifndef DACCESS_COMPILE
+extern "C" {
+#endif // !DACCESS_COMPILE
+GPTR_DECL(uint8_t,g_lowest_address);
+GPTR_DECL(uint8_t,g_highest_address);
+GPTR_DECL(uint32_t,g_card_table);
+#ifndef DACCESS_COMPILE
+}
+#endif // !DACCESS_COMPILE
+
+extern "C" uint8_t* g_ephemeral_low;
+extern "C" uint8_t* g_ephemeral_high;
+
+#endif // _GCHEAPUTILITIES_H_ \ No newline at end of file
diff --git a/src/vm/gchelpers.cpp b/src/vm/gchelpers.cpp
index bf81847716..20a3a29540 100644
--- a/src/vm/gchelpers.cpp
+++ b/src/vm/gchelpers.cpp
@@ -16,7 +16,7 @@
#include "threads.h"
#include "eetwain.h"
#include "eeconfig.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "corhost.h"
#include "threads.h"
#include "fieldmarshaler.h"
@@ -51,15 +51,50 @@
orObject = (ArrayBase *) OBJECTREFToObject(objref);
-inline alloc_context* GetThreadAllocContext()
+inline gc_alloc_context* GetThreadAllocContext()
{
WRAPPER_NO_CONTRACT;
- assert(GCHeap::UseAllocationContexts());
+ assert(GCHeapUtilities::UseAllocationContexts());
return & GetThread()->m_alloc_context;
}
+// Checks to see if the given allocation size exceeds the
+// largest object size allowed - if it does, it throws
+// an OutOfMemoryException with a message indicating that
+// the OOM was not from memory pressure but from an object
+// being too large.
+inline void CheckObjectSize(size_t alloc_size)
+{
+ CONTRACTL {
+ THROWS;
+ GC_TRIGGERS;
+ } CONTRACTL_END;
+
+ size_t max_object_size;
+#ifdef BIT64
+ if (g_pConfig->GetGCAllowVeryLargeObjects())
+ {
+ max_object_size = (INT64_MAX - 7 - min_obj_size);
+ }
+ else
+#endif // BIT64
+ {
+ max_object_size = (INT32_MAX - 7 - min_obj_size);
+ }
+
+ if (alloc_size >= max_object_size)
+ {
+ if (g_pConfig->IsGCBreakOnOOMEnabled())
+ {
+ DebugBreak();
+ }
+
+ ThrowOutOfMemoryDimensionsExceeded();
+ }
+}
+
// There are only three ways to get into allocate an object.
// * Call optimized helpers that were generated on the fly. This is how JIT compiled code does most
@@ -98,14 +133,21 @@ inline Object* Alloc(size_t size, BOOL bFinalize, BOOL bContainsPointers )
(bFinalize ? GC_ALLOC_FINALIZE : 0));
Object *retVal = NULL;
+ CheckObjectSize(size);
// We don't want to throw an SO during the GC, so make sure we have plenty
// of stack before calling in.
INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
- if (GCHeap::UseAllocationContexts())
- retVal = GCHeap::GetGCHeap()->Alloc(GetThreadAllocContext(), size, flags);
+ if (GCHeapUtilities::UseAllocationContexts())
+ retVal = GCHeapUtilities::GetGCHeap()->Alloc(GetThreadAllocContext(), size, flags);
else
- retVal = GCHeap::GetGCHeap()->Alloc(size, flags);
+ retVal = GCHeapUtilities::GetGCHeap()->Alloc(size, flags);
+
+ if (!retVal)
+ {
+ ThrowOutOfMemory();
+ }
+
END_INTERIOR_STACK_PROBE;
return retVal;
}
@@ -126,14 +168,20 @@ inline Object* AllocAlign8(size_t size, BOOL bFinalize, BOOL bContainsPointers,
(bAlignBias ? GC_ALLOC_ALIGN8_BIAS : 0));
Object *retVal = NULL;
+ CheckObjectSize(size);
// We don't want to throw an SO during the GC, so make sure we have plenty
// of stack before calling in.
INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
- if (GCHeap::UseAllocationContexts())
- retVal = GCHeap::GetGCHeap()->AllocAlign8(GetThreadAllocContext(), size, flags);
+ if (GCHeapUtilities::UseAllocationContexts())
+ retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(GetThreadAllocContext(), size, flags);
else
- retVal = GCHeap::GetGCHeap()->AllocAlign8(size, flags);
+ retVal = GCHeapUtilities::GetGCHeap()->AllocAlign8(size, flags);
+
+ if (!retVal)
+ {
+ ThrowOutOfMemory();
+ }
END_INTERIOR_STACK_PROBE;
return retVal;
@@ -169,11 +217,18 @@ inline Object* AllocLHeap(size_t size, BOOL bFinalize, BOOL bContainsPointers )
(bFinalize ? GC_ALLOC_FINALIZE : 0));
Object *retVal = NULL;
+ CheckObjectSize(size);
// We don't want to throw an SO during the GC, so make sure we have plenty
// of stack before calling in.
INTERIOR_STACK_PROBE_FOR(GetThread(), static_cast<unsigned>(DEFAULT_ENTRY_PROBE_AMOUNT * 1.5));
- retVal = GCHeap::GetGCHeap()->AllocLHeap(size, flags);
+ retVal = GCHeapUtilities::GetGCHeap()->AllocLHeap(size, flags);
+
+ if (!retVal)
+ {
+ ThrowOutOfMemory();
+ }
+
END_INTERIOR_STACK_PROBE;
return retVal;
}
@@ -427,7 +482,7 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, B
if (bAllocateInLargeHeap ||
(totalSize >= LARGE_OBJECT_SIZE))
{
- GCHeap::GetGCHeap()->PublishObject((BYTE*)orArray);
+ GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orArray);
}
#ifdef _LOGALLOC
@@ -651,7 +706,7 @@ OBJECTREF FastAllocatePrimitiveArray(MethodTable* pMT, DWORD cElements, BOOL b
if (bPublish)
{
- GCHeap::GetGCHeap()->PublishObject((BYTE*)orObject);
+ GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orObject);
}
// Notify the profiler of the allocation
@@ -860,7 +915,7 @@ STRINGREF SlowAllocateString( DWORD cchStringLength )
if (ObjectSize >= LARGE_OBJECT_SIZE)
{
- GCHeap::GetGCHeap()->PublishObject((BYTE*)orObject);
+ GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orObject);
}
// Notify the profiler of the allocation
@@ -951,8 +1006,10 @@ OBJECTREF AllocateObject(MethodTable *pMT
g_IBCLogger.LogMethodTableAccess(pMT);
SetTypeHandleOnThreadForAlloc(TypeHandle(pMT));
+#ifdef FEATURE_CER
if (pMT->HasCriticalFinalizer())
PrepareCriticalFinalizerObject(pMT);
+#endif
#ifdef FEATURE_COMINTEROP
#ifdef FEATURE_COMINTEROP_UNMANAGED_ACTIVATION
@@ -1000,7 +1057,7 @@ OBJECTREF AllocateObject(MethodTable *pMT
if ((baseSize >= LARGE_OBJECT_SIZE))
{
orObject->SetMethodTableForLargeObject(pMT);
- GCHeap::GetGCHeap()->PublishObject((BYTE*)orObject);
+ GCHeapUtilities::GetGCHeap()->PublishObject((BYTE*)orObject);
}
else
{
@@ -1234,7 +1291,7 @@ extern "C" HCIMPL2_RAW(VOID, JIT_WriteBarrier, Object **dst, Object *ref)
*dst = ref;
// If the store above succeeded, "dst" should be in the heap.
- assert(GCHeap::GetGCHeap()->IsHeapPointer((void*)dst));
+ assert(GCHeapUtilities::GetGCHeap()->IsHeapPointer((void*)dst));
#ifdef WRITE_BARRIER_CHECK
updateGCShadow(dst, ref); // support debugging write barrier
@@ -1280,7 +1337,7 @@ extern "C" HCIMPL2_RAW(VOID, JIT_WriteBarrierEnsureNonHeapTarget, Object **dst,
STATIC_CONTRACT_THROWS;
STATIC_CONTRACT_GC_NOTRIGGER;
- assert(!GCHeap::GetGCHeap()->IsHeapPointer((void*)dst));
+ assert(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((void*)dst));
// no HELPER_METHOD_FRAME because we are MODE_COOPERATIVE, GC_NOTRIGGER
diff --git a/src/vm/gchost.cpp b/src/vm/gchost.cpp
index 4f7d52f805..b51f2459fd 100644
--- a/src/vm/gchost.cpp
+++ b/src/vm/gchost.cpp
@@ -22,7 +22,7 @@
#include "corhost.h"
#include "excep.h"
#include "field.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#if !defined(FEATURE_CORECLR)
inline size_t SizeInKBytes(size_t cbSize)
@@ -48,7 +48,7 @@ HRESULT CorGCHost::_SetGCSegmentSize(SIZE_T SegmentSize)
HRESULT hr = S_OK;
// Sanity check the value, it must be a power of two and big enough.
- if (!GCHeap::IsValidSegmentSize(SegmentSize))
+ if (!GCHeapUtilities::GetGCHeap()->IsValidSegmentSize(SegmentSize))
{
hr = E_INVALIDARG;
}
@@ -74,7 +74,7 @@ HRESULT CorGCHost::_SetGCMaxGen0Size(SIZE_T MaxGen0Size)
HRESULT hr = S_OK;
// Sanity check the value is at least large enough.
- if (!GCHeap::IsValidGen0MaxSize(MaxGen0Size))
+ if (!GCHeapUtilities::GetGCHeap()->IsValidGen0MaxSize(MaxGen0Size))
{
hr = E_INVALIDARG;
}
@@ -151,7 +151,7 @@ HRESULT CorGCHost::Collect(
HRESULT hr = E_FAIL;
- if (Generation > (int) GCHeap::GetGCHeap()->GetMaxGeneration())
+ if (Generation > (int) GCHeapUtilities::GetGCHeap()->GetMaxGeneration())
hr = E_INVALIDARG;
else
{
@@ -170,7 +170,7 @@ HRESULT CorGCHost::Collect(
EX_TRY
{
- hr = GCHeap::GetGCHeap()->GarbageCollect(Generation);
+ hr = GCHeapUtilities::GetGCHeap()->GarbageCollect(Generation);
}
EX_CATCH
{
@@ -268,7 +268,7 @@ HRESULT CorGCHost::SetVirtualMemLimit(
}
CONTRACTL_END;
- GCHeap::GetGCHeap()->SetReservedVMLimit (sztMaxVirtualMemMB);
+ GCHeapUtilities::GetGCHeap()->SetReservedVMLimit (sztMaxVirtualMemMB);
return (S_OK);
}
#endif // !defined(FEATURE_CORECLR)
diff --git a/src/vm/gcinfodecoder.cpp b/src/vm/gcinfodecoder.cpp
index ef237a2768..89f470499e 100644
--- a/src/vm/gcinfodecoder.cpp
+++ b/src/vm/gcinfodecoder.cpp
@@ -4,6 +4,7 @@
#include "common.h"
+
#include "gcinfodecoder.h"
#ifdef USE_GC_INFO_DECODER
@@ -17,7 +18,7 @@
#endif
#ifndef GCINFODECODER_CONTRACT
-#define GCINFODECODER_CONTRACT(contract) contract
+#define GCINFODECODER_CONTRACT LIMITED_METHOD_CONTRACT
#endif // !GCINFODECODER_CONTRACT
@@ -68,7 +69,7 @@
}
#endif // !LOG_PIPTR
-bool GcInfoDecoder::SetIsInterruptibleCB (UINT32 startOffset, UINT32 stopOffset, LPVOID hCallback)
+bool GcInfoDecoder::SetIsInterruptibleCB (UINT32 startOffset, UINT32 stopOffset, void * hCallback)
{
GcInfoDecoder *pThis = (GcInfoDecoder*)hCallback;
@@ -282,11 +283,11 @@ GcInfoDecoder::GcInfoDecoder(
if (hasReversePInvokeFrame)
{
- m_ReversePInvokeFrameSlot = (INT32)m_Reader.DecodeVarLengthSigned(REVERSE_PINVOKE_FRAME_ENCBASE);
+ m_ReversePInvokeFrameStackSlot = (INT32)m_Reader.DecodeVarLengthSigned(REVERSE_PINVOKE_FRAME_ENCBASE);
}
else
{
- m_ReversePInvokeFrameSlot = NO_REVERSE_PINVOKE_FRAME;
+ m_ReversePInvokeFrameStackSlot = NO_REVERSE_PINVOKE_FRAME;
}
@@ -426,14 +427,14 @@ UINT32 GcInfoDecoder::FindSafePoint(UINT32 breakOffset)
return result;
}
-void GcInfoDecoder::EnumerateSafePoints(EnumerateSafePointsCallback *pCallback, LPVOID hCallback)
+void GcInfoDecoder::EnumerateSafePoints(EnumerateSafePointsCallback *pCallback, void * hCallback)
{
if(m_NumSafePoints == 0)
return;
const UINT32 numBitsPerOffset = CeilOfLog2(NORMALIZE_CODE_OFFSET(m_CodeLength));
- for(UINT i = 0; i < m_NumSafePoints; i++)
+ for(UINT32 i = 0; i < m_NumSafePoints; i++)
{
UINT32 normOffset = (UINT32)m_Reader.Read(numBitsPerOffset);
UINT32 offset = DENORMALIZE_CODE_OFFSET(normOffset) + 2;
@@ -450,7 +451,7 @@ void GcInfoDecoder::EnumerateSafePoints(EnumerateSafePointsCallback *pCallback,
void GcInfoDecoder::EnumerateInterruptibleRanges (
EnumerateInterruptibleRangesCallback *pCallback,
- LPVOID hCallback)
+ void * hCallback)
{
// If no info is found for the call site, we default to fully-interruptbile
LOG((LF_GCROOTS, LL_INFO1000000, "No GC info found for call site at offset %x. Defaulting to fully-interruptible information.\n", (int) m_InstructionOffset));
@@ -488,10 +489,10 @@ INT32 GcInfoDecoder::GetGSCookieStackSlot()
return m_GSCookieStackSlot;
}
-INT32 GcInfoDecoder::GetReversePInvokeStackSlot()
+INT32 GcInfoDecoder::GetReversePInvokeFrameStackSlot()
{
_ASSERTE(m_Flags & DECODE_REVERSE_PINVOKE_VAR);
- return m_ReversePInvokeStackSlot;
+ return m_ReversePInvokeFrameStackSlot;
}
UINT32 GcInfoDecoder::GetGSCookieValidRangeStart()
@@ -581,7 +582,7 @@ bool GcInfoDecoder::EnumerateLiveSlots(
bool reportScratchSlots,
unsigned inputFlags,
GCEnumCallback pCallBack,
- LPVOID hCallBack
+ void * hCallBack
)
{
@@ -615,24 +616,6 @@ bool GcInfoDecoder::EnumerateLiveSlots(
UINT32 normBreakOffset = NORMALIZE_CODE_OFFSET(m_InstructionOffset);
-#if 0
- // This is currently disabled because sometimes on IA64 we need
- // to make call sites non-interruptible
- // TODO: review this
-#ifdef _DEBUG
- if(!executionAborted)
- {
- GcInfoDecoder _decoder2(
- m_GcInfoAddress,
- DECODE_INTERRUPTIBILITY,
- m_InstructionOffset
- );
-
- _ASSERTE(_decoder2.IsInterruptible());
- }
-#endif
-#endif
-
// Normalized break offset
// Relative to interruptible ranges #if PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
@@ -645,48 +628,6 @@ bool GcInfoDecoder::EnumerateLiveSlots(
#ifdef PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
-#ifndef DISABLE_EH_VECTORS
- if(m_SafePointIndex < m_NumSafePoints || executionAborted)
- {
- // Skip interruptibility information
- for(UINT32 i=0; i<m_NumInterruptibleRanges; i++)
- {
- m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA1_ENCBASE );
- m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA2_ENCBASE );
- }
- }
- else
- {
- //
- // We didn't find the break offset in the list of call sites
- // and are not in an executionAborted frame
- // So we must have fully-interruptible information
- //
- _ASSERTE(m_NumInterruptibleRanges);
-
-#ifdef _DEBUG
- int dbgCountIntersections = 0;
-#endif
- UINT32 lastNormStop = 0;
- for(UINT32 i=0; i<m_NumInterruptibleRanges; i++)
- {
- UINT32 normStartDelta = (UINT32) m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA1_ENCBASE );
- UINT32 normStopDelta = (UINT32) m_Reader.DecodeVarLengthUnsigned( INTERRUPTIBLE_RANGE_DELTA2_ENCBASE ) + 1;
-
- UINT32 normStart = lastNormStop + normStartDelta;
- UINT32 normStop = normStart + normStopDelta;
- if(normBreakOffset >= normStart && normBreakOffset < normStop)
- {
- _ASSERTE(pseudoBreakOffset == 0);
- _ASSERTE(dbgCountIntersections++ == 0);
- pseudoBreakOffset = numInterruptibleLength + normBreakOffset - normStart;
- }
- numInterruptibleLength += normStopDelta;
- lastNormStop = normStop;
- }
- _ASSERTE(dbgCountIntersections == 1);
- }
-#else // DISABLE_EH_VECTORS
if(m_SafePointIndex < m_NumSafePoints && !executionAborted)
{
// Skip interruptibility information
@@ -736,7 +677,6 @@ bool GcInfoDecoder::EnumerateLiveSlots(
goto ExitSuccess;
}
}
-#endif // DISABLE_EH_VECTORS
#else // !PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
// Skip interruptibility information
@@ -778,52 +718,8 @@ bool GcInfoDecoder::EnumerateLiveSlots(
if(executionAborted)
{
-#ifndef DISABLE_EH_VECTORS
- m_Reader.Skip(m_NumSafePoints * numSlots);
-
- UINT32 numClauses = (UINT32) m_Reader.DecodeVarLengthUnsigned(NUM_EH_CLAUSES_ENCBASE);
-
- if(numClauses)
- {
- UINT32 numBitsPerOffset = CeilOfLog2(NORMALIZE_CODE_OFFSET(m_CodeLength));
-
- for(UINT32 i = 0; i < numClauses; i++)
- {
- UINT32 startOffset = (UINT32) DENORMALIZE_CODE_OFFSET(m_Reader.Read(numBitsPerOffset));
- UINT32 stopOffset = (UINT32) DENORMALIZE_CODE_OFFSET(m_Reader.Read(numBitsPerOffset) + 1);
-
- if(m_InstructionOffset >= startOffset
- && m_InstructionOffset < stopOffset)
- {
- for(UINT32 slotIndex = 0; slotIndex < numSlots; slotIndex++)
- {
- if(m_Reader.ReadOneFast())
- {
- ReportSlotToGC(
- slotDecoder,
- slotIndex,
- pRD,
- reportScratchSlots,
- inputFlags,
- pCallBack,
- hCallBack
- );
- }
- }
- }
- else
- {
- m_Reader.Skip(numSlots);
- }
- }
- }
- goto ReportUntracked;
-#else //DISABLE_EH_VECTORS
-
_ASSERTE(m_NumSafePoints == 0);
m_Reader.Skip(m_NumSafePoints * numSlots);
-
-#endif //DISABLE_EH_VECTORS
}
else if( m_SafePointIndex != m_NumSafePoints )
{
@@ -891,15 +787,7 @@ bool GcInfoDecoder::EnumerateLiveSlots(
else
{
m_Reader.Skip(m_NumSafePoints * numSlots);
-
-#ifndef DISABLE_EH_VECTORS
- UINT32 numClauses = (UINT32) m_Reader.DecodeVarLengthUnsigned(NUM_EH_CLAUSES_ENCBASE);
- UINT32 numBitsPerOffset = CeilOfLog2(NORMALIZE_CODE_OFFSET(m_CodeLength));
-
- m_Reader.Skip((numBitsPerOffset * 2 + numSlots) * numClauses);
-#endif //DISABLE_EH_VECTORS
- }
-
+ }
#endif // PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
_ASSERTE(m_NumInterruptibleRanges);
@@ -1069,9 +957,7 @@ ReportUntracked:
ReportUntrackedSlots(slotDecoder, pRD, inputFlags, pCallBack, hCallBack);
}
-#ifdef DISABLE_EH_VECTORS
ExitSuccess:
-#endif
return true;
}
@@ -1080,7 +966,7 @@ void GcInfoDecoder::EnumerateUntrackedSlots(
PREGDISPLAY pRD,
unsigned inputFlags,
GCEnumCallback pCallBack,
- LPVOID hCallBack
+ void * hCallBack
)
{
_ASSERTE(GC_SLOT_INTERIOR == GC_CALL_INTERIOR);
@@ -1114,7 +1000,7 @@ void GcInfoDecoder::ReportUntrackedSlots(
PREGDISPLAY pRD,
unsigned inputFlags,
GCEnumCallback pCallBack,
- LPVOID hCallBack
+ void * hCallBack
)
{
for(UINT32 slotIndex = slotDecoder.GetNumTracked(); slotIndex < slotDecoder.GetNumSlots(); slotIndex++)
@@ -1455,11 +1341,15 @@ OBJECTREF* GcInfoDecoder::GetRegisterSlot(
_ASSERTE(regNum >= 0 && regNum <= 16);
_ASSERTE(regNum != 4); // rsp
+#ifdef FEATURE_REDHAWK
+ PTR_UIntNative* ppRax = &pRD->pRax;
+ if (regNum > 4) regNum--; // rsp is skipped in Redhawk RegDisplay
+#else
// The fields of KNONVOLATILE_CONTEXT_POINTERS are in the same order as
// the processor encoding numbers.
- ULONGLONG **ppRax;
- ppRax = &pRD->pCurrentContextPointers->Rax;
+ ULONGLONG **ppRax = &pRD->pCurrentContextPointers->Rax;
+#endif
return (OBJECTREF*)*(ppRax + regNum);
}
@@ -1476,8 +1366,7 @@ OBJECTREF* GcInfoDecoder::GetCapturedRegister(
// The fields of CONTEXT are in the same order as
// the processor encoding numbers.
- ULONGLONG *pRax;
- pRax = &pRD->pCurrentContext->Rax;
+ ULONGLONG *pRax = &pRD->pCurrentContext->Rax;
return (OBJECTREF*)(pRax + regNum);
}
@@ -1509,7 +1398,7 @@ bool GcInfoDecoder::IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, P
#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
_ASSERTE( m_Flags & DECODE_GC_LIFETIMES );
- ULONGLONG pSlot = (ULONGLONG) GetStackSlot(spOffset, spBase, pRD);
+ TADDR pSlot = (TADDR) GetStackSlot(spOffset, spBase, pRD);
_ASSERTE(pSlot >= pRD->SP);
return (pSlot < pRD->SP + m_SizeOfStackOutgoingAndScratchArea);
@@ -1525,12 +1414,9 @@ void GcInfoDecoder::ReportRegisterToGC( // AMD64
PREGDISPLAY pRD,
unsigned flags,
GCEnumCallback pCallBack,
- LPVOID hCallBack)
+ void * hCallBack)
{
- GCINFODECODER_CONTRACT(CONTRACTL {
- NOTHROW;
- GC_NOTRIGGER;
- } CONTRACTL_END);
+ GCINFODECODER_CONTRACT;
_ASSERTE(regNum >= 0 && regNum <= 16);
_ASSERTE(regNum != 4); // rsp
@@ -1624,8 +1510,7 @@ OBJECTREF* GcInfoDecoder::GetCapturedRegister(
// The fields of CONTEXT are in the same order as
// the processor encoding numbers.
- ULONG *pR0;
- pR0 = &pRD->pCurrentContext->R0;
+ ULONG *pR0 = &pRD->pCurrentContext->R0;
return (OBJECTREF*)(pR0 + regNum);
}
@@ -1646,7 +1531,7 @@ bool GcInfoDecoder::IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, P
#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
_ASSERTE( m_Flags & DECODE_GC_LIFETIMES );
- DWORD pSlot = (DWORD) GetStackSlot(spOffset, spBase, pRD);
+ TADDR pSlot = (TADDR) GetStackSlot(spOffset, spBase, pRD);
_ASSERTE(pSlot >= pRD->SP);
return (pSlot < pRD->SP + m_SizeOfStackOutgoingAndScratchArea);
@@ -1662,12 +1547,9 @@ void GcInfoDecoder::ReportRegisterToGC( // ARM
PREGDISPLAY pRD,
unsigned flags,
GCEnumCallback pCallBack,
- LPVOID hCallBack)
+ void * hCallBack)
{
- GCINFODECODER_CONTRACT(CONTRACTL {
- NOTHROW;
- GC_NOTRIGGER;
- } CONTRACTL_END);
+ GCINFODECODER_CONTRACT;
_ASSERTE(regNum >= 0 && regNum <= 14);
_ASSERTE(regNum != 13); // sp
@@ -1740,7 +1622,7 @@ bool GcInfoDecoder::IsScratchStackSlot(INT32 spOffset, GcStackSlotBase spBase, P
#ifdef FIXED_STACK_PARAMETER_SCRATCH_AREA
_ASSERTE( m_Flags & DECODE_GC_LIFETIMES );
- ULONGLONG pSlot = (ULONGLONG) GetStackSlot(spOffset, spBase, pRD);
+ TADDR pSlot = (TADDR) GetStackSlot(spOffset, spBase, pRD);
_ASSERTE(pSlot >= pRD->SP);
return (pSlot < pRD->SP + m_SizeOfStackOutgoingAndScratchArea);
@@ -1756,12 +1638,9 @@ void GcInfoDecoder::ReportRegisterToGC( // ARM64
PREGDISPLAY pRD,
unsigned flags,
GCEnumCallback pCallBack,
- LPVOID hCallBack)
+ void * hCallBack)
{
- GCINFODECODER_CONTRACT(CONTRACTL {
- NOTHROW;
- GC_NOTRIGGER;
- } CONTRACTL_END);
+ GCINFODECODER_CONTRACT;
_ASSERTE(regNum >= 0 && regNum <= 30);
_ASSERTE(regNum != 18);
@@ -1801,8 +1680,7 @@ OBJECTREF* GcInfoDecoder::GetCapturedRegister(
// The fields of CONTEXT are in the same order as
// the processor encoding numbers.
- DWORD64 *pX0;
- pX0 = &pRD->pCurrentContext->X0;
+ DWORD64 *pX0 = &pRD->pCurrentContext->X0;
return (OBJECTREF*)(pX0 + regNum);
}
@@ -1837,7 +1715,7 @@ void GcInfoDecoder::ReportRegisterToGC(
PREGDISPLAY pRD,
unsigned flags,
GCEnumCallback pCallBack,
- LPVOID hCallBack)
+ void * hCallBack)
{
_ASSERTE( !"NYI" );
}
@@ -1859,7 +1737,7 @@ OBJECTREF* GcInfoDecoder::GetStackSlot(
if( GC_SP_REL == spBase )
{
- pObjRef = (OBJECTREF*) ((SIZE_T)GetRegdisplaySP(pRD) + spOffset);
+ pObjRef = (OBJECTREF*) ((SIZE_T)pRD->SP + spOffset);
}
else if( GC_CALLER_SP_REL == spBase )
{
@@ -1916,12 +1794,9 @@ void GcInfoDecoder::ReportStackSlotToGC(
PREGDISPLAY pRD,
unsigned flags,
GCEnumCallback pCallBack,
- LPVOID hCallBack)
+ void * hCallBack)
{
- GCINFODECODER_CONTRACT(CONTRACTL {
- NOTHROW;
- GC_NOTRIGGER;
- } CONTRACTL_END);
+ GCINFODECODER_CONTRACT;
OBJECTREF* pObjRef = GetStackSlot(spOffset, spBase, pRD);
_ASSERTE( IS_ALIGNED( pObjRef, sizeof( Object* ) ) );
diff --git a/src/vm/gc.h b/src/vm/gcinterface.h
index 825b5da803..cc70becdf1 100644
--- a/src/vm/gc.h
+++ b/src/vm/gcinterface.h
@@ -2,4 +2,4 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-#include "../gc/gc.h"
+#include "../gc/gcinterface.h" \ No newline at end of file
diff --git a/src/vm/gcstress.h b/src/vm/gcstress.h
index 609276e148..04487c611e 100644
--- a/src/vm/gcstress.h
+++ b/src/vm/gcstress.h
@@ -280,17 +280,17 @@ namespace _GCStress
// GC Trigger policy classes define how a garbage collection is triggered
// This is the default GC Trigger policy that simply calls
- // GCHeap::StressHeap
+ // IGCHeap::StressHeap
class StressGcTriggerPolicy
{
public:
FORCEINLINE
static void Trigger()
- { GCHeap::GetGCHeap()->StressHeap(); }
+ { GCHeapUtilities::GetGCHeap()->StressHeap(); }
FORCEINLINE
- static void Trigger(::alloc_context* acontext)
- { GCHeap::GetGCHeap()->StressHeap(acontext); }
+ static void Trigger(::gc_alloc_context* acontext)
+ { GCHeapUtilities::GetGCHeap()->StressHeap(acontext); }
};
// This is an overriding GC Trigger policy that triggers a GC by calling
@@ -403,7 +403,7 @@ namespace _GCStress
// Additionally it switches the GC mode as specified by GcModePolicy, and it
// uses GcTriggerPolicy::Trigger(alloc_context*) to actually trigger the GC
FORCEINLINE
- static void MaybeTrigger(::alloc_context* acontext, DWORD minFastGc = 0)
+ static void MaybeTrigger(::gc_alloc_context* acontext, DWORD minFastGc = 0)
{
if (IsEnabled(minFastGc) && GCStressPolicy::IsEnabled())
{
@@ -455,7 +455,7 @@ namespace _GCStress
public:
FORCEINLINE
- static void MaybeTrigger(::alloc_context* acontext)
+ static void MaybeTrigger(::gc_alloc_context* acontext)
{
GcStressBase::MaybeTrigger(acontext);
diff --git a/src/vm/gdbjit.cpp b/src/vm/gdbjit.cpp
index 9f9c116820..8e728839d6 100644
--- a/src/vm/gdbjit.cpp
+++ b/src/vm/gdbjit.cpp
@@ -11,16 +11,323 @@
//*****************************************************************************
#include "common.h"
+#include "formattype.h"
#include "gdbjit.h"
#include "gdbjithelpers.h"
-struct DebuggerILToNativeMap
+TypeInfoBase*
+GetTypeInfoFromTypeHandle(TypeHandle typeHandle, NotifyGdb::PTK_TypeInfoMap pTypeMap)
{
- ULONG ilOffset;
- ULONG nativeStartOffset;
- ULONG nativeEndOffset;
- ICorDebugInfo::SourceTypes source;
-};
+ TypeInfoBase *typeInfo = nullptr;
+ TypeKey key = typeHandle.GetTypeKey();
+ PTR_MethodTable pMT = typeHandle.GetMethodTable();
+
+ if (pTypeMap->Lookup(&key, &typeInfo))
+ {
+ return typeInfo;
+ }
+
+ CorElementType corType = typeHandle.GetSignatureCorElementType();
+ switch (corType)
+ {
+ case ELEMENT_TYPE_VOID:
+ case ELEMENT_TYPE_BOOLEAN:
+ case ELEMENT_TYPE_CHAR:
+ case ELEMENT_TYPE_I1:
+ case ELEMENT_TYPE_U1:
+ case ELEMENT_TYPE_I2:
+ case ELEMENT_TYPE_U2:
+ case ELEMENT_TYPE_I4:
+ case ELEMENT_TYPE_U4:
+ case ELEMENT_TYPE_I8:
+ case ELEMENT_TYPE_U8:
+ case ELEMENT_TYPE_R4:
+ case ELEMENT_TYPE_R8:
+ case ELEMENT_TYPE_U:
+ case ELEMENT_TYPE_I:
+ typeInfo = new (nothrow) PrimitiveTypeInfo(typeHandle, CorElementTypeToDWEncoding[corType]);
+ if (typeInfo == nullptr)
+ return nullptr;
+
+ typeInfo->m_type_size = CorTypeInfo::Size(corType);
+
+ break;
+ case ELEMENT_TYPE_VALUETYPE:
+ case ELEMENT_TYPE_CLASS:
+ {
+ ApproxFieldDescIterator fieldDescIterator(pMT,
+ pMT->IsString() ? ApproxFieldDescIterator::INSTANCE_FIELDS : ApproxFieldDescIterator::ALL_FIELDS);
+ ULONG cFields = fieldDescIterator.Count();
+
+ typeInfo = new (nothrow) ClassTypeInfo(typeHandle, cFields);
+
+ if (typeInfo == nullptr)
+ return nullptr;
+
+ typeInfo->m_type_size = typeHandle.AsMethodTable()->GetClass()->GetSize();
+
+ RefTypeInfo* refTypeInfo = nullptr;
+ if (!typeHandle.IsValueType())
+ {
+ // name the type
+ refTypeInfo = new (nothrow) RefTypeInfo(typeHandle, typeInfo);
+ if (refTypeInfo == nullptr)
+ {
+ return nullptr;
+ }
+ refTypeInfo->m_type_size = sizeof(TADDR);
+ refTypeInfo->m_value_type = typeInfo;
+ refTypeInfo->CalculateName();
+
+ pTypeMap->Add(refTypeInfo->GetTypeKey(), refTypeInfo);
+ }
+
+ pTypeMap->Add(typeInfo->GetTypeKey(), typeInfo);
+ typeInfo->CalculateName();
+
+ //
+ // Now fill in the array
+ //
+ FieldDesc *pField;
+
+ for (ULONG i = 0; i < cFields; i++)
+ {
+ pField = fieldDescIterator.Next();
+ ClassTypeInfo *info = static_cast<ClassTypeInfo*>(typeInfo);
+
+ LPCUTF8 szName = pField->GetName();
+ info->members[i].m_member_name = new char[strlen(szName) + 1];
+ strcpy(info->members[i].m_member_name, szName);
+ if (!pField->IsStatic())
+ {
+ info->members[i].m_member_offset = (ULONG)pField->GetOffset();
+ if (!typeHandle.IsValueType())
+ info->members[i].m_member_offset += Object::GetOffsetOfFirstField();
+ }
+ else
+ {
+ PTR_BYTE base = 0;
+ MethodTable* pMT = pField->GetEnclosingMethodTable();
+ base = pField->GetBase();
+
+ // TODO: add support of generics with static fields
+ if (pField->IsRVA() || !pMT->IsDynamicStatics())
+ {
+ PTR_VOID pAddress = pField->GetStaticAddressHandle((PTR_VOID)dac_cast<TADDR>(base));
+ info->members[i].m_static_member_address = dac_cast<TADDR>(pAddress);
+ }
+ }
+
+ info->members[i].m_member_type =
+ GetTypeInfoFromTypeHandle(pField->GetExactFieldType(typeHandle), pTypeMap);
+
+ // handle the System.String case:
+ // coerce type of the second field into array type
+ if (pMT->IsString() && i == 1)
+ {
+ TypeInfoBase* elemTypeInfo = info->members[1].m_member_type;
+ TypeInfoBase* arrayTypeInfo = new (nothrow) ArrayTypeInfo(typeHandle.MakeSZArray(), 0, elemTypeInfo);
+ if (arrayTypeInfo == nullptr)
+ return nullptr;
+ info->members[1].m_member_type = arrayTypeInfo;
+ }
+ }
+ if (refTypeInfo)
+ return refTypeInfo;
+ else
+ return typeInfo;
+ }
+ case ELEMENT_TYPE_PTR:
+ case ELEMENT_TYPE_BYREF:
+ {
+ TypeInfoBase* valTypeInfo = GetTypeInfoFromTypeHandle(typeHandle.GetTypeParam(), pTypeMap);
+ typeInfo = new (nothrow) RefTypeInfo(typeHandle, valTypeInfo);
+ if (typeInfo == nullptr)
+ return nullptr;
+ typeInfo->m_type_size = sizeof(TADDR);
+ typeInfo->m_type_offset = valTypeInfo->m_type_offset;
+ break;
+ }
+ case ELEMENT_TYPE_ARRAY:
+ case ELEMENT_TYPE_SZARRAY:
+ {
+ typeInfo = new (nothrow) ClassTypeInfo(typeHandle, 2);
+ if (typeInfo == nullptr)
+ return nullptr;
+ typeInfo->m_type_size = pMT->GetClass()->GetSize();
+
+ typeInfo->CalculateName();
+ RefTypeInfo *refTypeInfo = new (nothrow) RefTypeInfo(typeHandle, typeInfo);
+ if (refTypeInfo == nullptr)
+ {
+ return nullptr;
+ }
+ refTypeInfo->m_type_size = sizeof(TADDR);
+ refTypeInfo->m_value_type = typeInfo;
+ refTypeInfo->CalculateName();
+
+ pTypeMap->Add(refTypeInfo->GetTypeKey(), refTypeInfo);
+
+ TypeInfoBase* lengthTypeInfo = GetTypeInfoFromTypeHandle(
+ TypeHandle(MscorlibBinder::GetElementType(ELEMENT_TYPE_I4)), pTypeMap);
+
+ TypeInfoBase* valTypeInfo = GetTypeInfoFromTypeHandle(typeHandle.GetTypeParam(), pTypeMap);
+ TypeInfoBase* arrayTypeInfo = new (nothrow) ArrayTypeInfo(typeHandle, 0, valTypeInfo);
+ if (arrayTypeInfo == nullptr)
+ return nullptr;
+
+ ClassTypeInfo *info = static_cast<ClassTypeInfo*>(typeInfo);
+
+ info->members[0].m_member_name = new (nothrow) char[16];
+ strcpy(info->members[0].m_member_name, "m_NumComponents");
+ info->members[0].m_member_offset = ArrayBase::GetOffsetOfNumComponents();
+ info->members[0].m_member_type = lengthTypeInfo;
+ info->members[0].m_member_type->m_type_size = sizeof(DWORD);
+
+ info->members[1].m_member_name = new (nothrow) char[7];
+ strcpy(info->members[1].m_member_name, "m_Data");
+ info->members[1].m_member_offset = ArrayBase::GetDataPtrOffset(pMT);
+ info->members[1].m_member_type = arrayTypeInfo;
+ info->members[1].m_member_type->m_type_size = sizeof(TADDR);
+
+ return refTypeInfo;
+ }
+ default:
+ ASSERT(0 && "not implemented");
+ break;
+ }
+ // name the type
+ if (corType == ELEMENT_TYPE_CHAR)
+ {
+ typeInfo->m_type_name = new char[9];
+ strcpy(typeInfo->m_type_name, "char16_t");
+ }
+ else
+ {
+ typeInfo->CalculateName();
+ }
+ pTypeMap->Add(typeInfo->GetTypeKey(), typeInfo);
+ return typeInfo;
+}
+
+TypeInfoBase* GetArgTypeInfo(MethodDesc* MethodDescPtr,
+ NotifyGdb::PTK_TypeInfoMap pTypeMap,
+ unsigned ilIndex)
+{
+ MetaSig sig(MethodDescPtr);
+ TypeHandle th;
+ if (ilIndex == 0)
+ {
+ th = sig.GetRetTypeHandleNT();
+ }
+ else
+ {
+ while (--ilIndex)
+ sig.SkipArg();
+
+ sig.NextArg();
+ th = sig.GetLastTypeHandleNT();
+ }
+ return GetTypeInfoFromTypeHandle(th, pTypeMap);
+}
+
+TypeInfoBase* GetLocalTypeInfo(MethodDesc *MethodDescPtr,
+ NotifyGdb::PTK_TypeInfoMap pTypeMap,
+ unsigned ilIndex)
+{
+ COR_ILMETHOD_DECODER method(MethodDescPtr->GetILHeader());
+ if (method.GetLocalVarSigTok())
+ {
+ DWORD cbSigLen;
+ PCCOR_SIGNATURE pComSig;
+
+ if (FAILED(MethodDescPtr->GetMDImport()->GetSigFromToken(method.GetLocalVarSigTok(), &cbSigLen, &pComSig)))
+ {
+ printf("\nInvalid record");
+ return nullptr;
+ }
+
+ _ASSERTE(*pComSig == IMAGE_CEE_CS_CALLCONV_LOCAL_SIG);
+
+ SigTypeContext typeContext(MethodDescPtr, TypeHandle());
+ MetaSig sig(pComSig, cbSigLen, MethodDescPtr->GetModule(), &typeContext, MetaSig::sigLocalVars);
+ if (ilIndex > 0)
+ {
+ while (ilIndex--)
+ sig.SkipArg();
+ }
+ sig.NextArg();
+ TypeHandle th = sig.GetLastTypeHandleNT();
+ return GetTypeInfoFromTypeHandle(th, pTypeMap);
+ }
+ return nullptr;
+}
+
+HRESULT GetArgNameByILIndex(MethodDesc* MethodDescPtr, unsigned index, LPSTR &paramName)
+{
+ IMDInternalImport* mdImport = MethodDescPtr->GetMDImport();
+ mdParamDef paramToken;
+ USHORT seq;
+ DWORD attr;
+ HRESULT status;
+
+ // Param indexing is 1-based.
+ ULONG32 mdIndex = index + 1;
+
+ MetaSig sig(MethodDescPtr);
+ if (sig.HasThis())
+ {
+ mdIndex--;
+ }
+ status = mdImport->FindParamOfMethod(MethodDescPtr->GetMemberDef(), mdIndex, &paramToken);
+ if (status == S_OK)
+ {
+ LPCSTR name;
+ status = mdImport->GetParamDefProps(paramToken, &seq, &attr, &name);
+ paramName = new char[strlen(name) + 1];
+ strcpy(paramName, name);
+ }
+ return status;
+}
+
+// Copy-pasted from src/debug/di/module.cpp
+HRESULT FindNativeInfoInILVariable(DWORD dwIndex,
+ SIZE_T ip,
+ ICorDebugInfo::NativeVarInfo** nativeInfoList,
+ unsigned int nativeInfoCount,
+ ICorDebugInfo::NativeVarInfo** ppNativeInfo)
+{
+ _ASSERTE(ppNativeInfo != NULL);
+ *ppNativeInfo = NULL;
+ int lastGoodOne = -1;
+ for (unsigned int i = 0; i < (unsigned)nativeInfoCount; i++)
+ {
+ if ((*nativeInfoList)[i].varNumber == dwIndex)
+ {
+ if ((lastGoodOne == -1) || ((*nativeInfoList)[lastGoodOne].startOffset < (*nativeInfoList)[i].startOffset))
+ {
+ lastGoodOne = i;
+ }
+
+ if (((*nativeInfoList)[i].startOffset <= ip) &&
+ ((*nativeInfoList)[i].endOffset > ip))
+ {
+ *ppNativeInfo = &((*nativeInfoList)[i]);
+
+ return S_OK;
+ }
+ }
+ }
+
+ if ((lastGoodOne > -1) && ((*nativeInfoList)[lastGoodOne].endOffset == ip))
+ {
+ *ppNativeInfo = &((*nativeInfoList)[lastGoodOne]);
+ return S_OK;
+ }
+
+ return CORDBG_E_IL_VAR_NOT_AVAILABLE;
+}
+
BYTE* DebugInfoStoreNew(void * pData, size_t cBytes)
{
return new (nothrow) BYTE[cBytes];
@@ -30,7 +337,9 @@ BYTE* DebugInfoStoreNew(void * pData, size_t cBytes)
HRESULT
GetMethodNativeMap(MethodDesc* methodDesc,
ULONG32* numMap,
- DebuggerILToNativeMap** map)
+ DebuggerILToNativeMap** map,
+ ULONG32* pcVars,
+ ICorDebugInfo::NativeVarInfo** ppVars)
{
// Use the DebugInfoStore to get IL->Native maps.
// It doesn't matter whether we're jitted, ngenned etc.
@@ -48,8 +357,8 @@ GetMethodNativeMap(MethodDesc* methodDesc,
NULL, // allocator
&countMapCopy,
&mapCopy,
- NULL,
- NULL);
+ pcVars,
+ ppVars);
if (!success)
{
@@ -83,9 +392,62 @@ GetMethodNativeMap(MethodDesc* methodDesc,
return S_OK;
}
+HRESULT FunctionMember::GetLocalsDebugInfo(NotifyGdb::PTK_TypeInfoMap pTypeMap,
+ LocalsInfo& locals,
+ int startNativeOffset)
+{
+
+ ICorDebugInfo::NativeVarInfo* nativeVar = NULL;
+ int thisOffs = 0;
+ if (!md->IsStatic())
+ {
+ thisOffs = 1;
+ }
+
+ int i;
+ for (i = 0; i < m_num_args - thisOffs; i++)
+ {
+ if (FindNativeInfoInILVariable(i + thisOffs, startNativeOffset, &locals.pVars, locals.countVars, &nativeVar) == S_OK)
+ {
+ vars[i + thisOffs].m_var_type = GetArgTypeInfo(md, pTypeMap, i + 1);
+ GetArgNameByILIndex(md, i + thisOffs, vars[i + thisOffs].m_var_name);
+ vars[i + thisOffs].m_il_index = i;
+ vars[i + thisOffs].m_native_offset = nativeVar->loc.vlStk.vlsOffset;
+ vars[i + thisOffs].m_var_abbrev = 6;
+ }
+ }
+ //Add info about 'this' as first argument
+ if (thisOffs == 1)
+ {
+ if (FindNativeInfoInILVariable(0, startNativeOffset, &locals.pVars, locals.countVars, &nativeVar) == S_OK)
+ {
+ vars[0].m_var_type = GetTypeInfoFromTypeHandle(TypeHandle(md->GetMethodTable()), pTypeMap);
+ vars[0].m_var_name = new char[strlen("this") + 1];
+ strcpy(vars[0].m_var_name, "this");
+ vars[0].m_il_index = 0;
+ vars[0].m_native_offset = nativeVar->loc.vlStk.vlsOffset;
+ vars[0].m_var_abbrev = 13;
+ }
+ i++;
+ }
+ for (; i < m_num_vars; i++)
+ {
+ if (FindNativeInfoInILVariable(
+ i, startNativeOffset, &locals.pVars, locals.countVars, &nativeVar) == S_OK)
+ {
+ vars[i].m_var_type = GetLocalTypeInfo(md, pTypeMap, i - m_num_args);
+ vars[i].m_var_name = new char[strlen(locals.localsName[i - m_num_args]) + 1];
+ strcpy(vars[i].m_var_name, locals.localsName[i - m_num_args]);
+ vars[i].m_il_index = i - m_num_args;
+ vars[i].m_native_offset = nativeVar->loc.vlStk.vlsOffset;
+ vars[i].m_var_abbrev = 5;
+ }
+ }
+ return S_OK;
+}
/* Get mapping of IL offsets to source line numbers */
HRESULT
-GetDebugInfoFromPDB(MethodDesc* MethodDescPtr, SymbolsInfo** symInfo, unsigned int &symInfoLen)
+GetDebugInfoFromPDB(MethodDesc* MethodDescPtr, SymbolsInfo** symInfo, unsigned int &symInfoLen, LocalsInfo &locals)
{
DebuggerILToNativeMap* map = NULL;
@@ -93,8 +455,8 @@ GetDebugInfoFromPDB(MethodDesc* MethodDescPtr, SymbolsInfo** symInfo, unsigned i
if (!getInfoForMethodDelegate)
return E_FAIL;
-
- if (GetMethodNativeMap(MethodDescPtr, &numMap, &map) != S_OK)
+
+ if (GetMethodNativeMap(MethodDescPtr, &numMap, &map, &locals.countVars, &locals.pVars) != S_OK)
return E_FAIL;
const Module* mod = MethodDescPtr->GetMethodTable()->GetModule();
@@ -105,48 +467,125 @@ GetDebugInfoFromPDB(MethodDesc* MethodDescPtr, SymbolsInfo** symInfo, unsigned i
StackScratchBuffer scratch;
const char* szModName = modName.GetUTF8(scratch);
- MethodDebugInfo* methodDebugInfo = new (nothrow) MethodDebugInfo();
- if (methodDebugInfo == nullptr)
- return E_OUTOFMEMORY;
+ MethodDebugInfo methodDebugInfo;
- methodDebugInfo->points = (SequencePointInfo*) CoTaskMemAlloc(sizeof(SequencePointInfo) * numMap);
- if (methodDebugInfo->points == nullptr)
+ methodDebugInfo.points = (SequencePointInfo*) CoTaskMemAlloc(sizeof(SequencePointInfo) * numMap);
+ if (methodDebugInfo.points == nullptr)
return E_OUTOFMEMORY;
- methodDebugInfo->size = numMap;
+ methodDebugInfo.size = numMap;
- if (getInfoForMethodDelegate(szModName, MethodDescPtr->GetMemberDef(), *methodDebugInfo) == FALSE)
+ if (getInfoForMethodDelegate(szModName, MethodDescPtr->GetMemberDef(), methodDebugInfo) == FALSE)
return E_FAIL;
- symInfoLen = methodDebugInfo->size;
- *symInfo = new (nothrow) SymbolsInfo[symInfoLen];
+ symInfoLen = numMap;
+ *symInfo = new (nothrow) SymbolsInfo[numMap];
if (*symInfo == nullptr)
return E_FAIL;
+ locals.size = methodDebugInfo.localsSize;
+ locals.localsName = new (nothrow) char *[locals.size];
+ if (locals.localsName == nullptr)
+ return E_FAIL;
+
+ for (ULONG32 i = 0; i < locals.size; i++)
+ {
+ size_t sizeRequired = WideCharToMultiByte(CP_UTF8, 0, methodDebugInfo.locals[i], -1, NULL, 0, NULL, NULL);
+ locals.localsName[i] = new (nothrow) char[sizeRequired];
- for (ULONG32 i = 0; i < symInfoLen; i++)
+ int len = WideCharToMultiByte(
+ CP_UTF8, 0, methodDebugInfo.locals[i], -1, locals.localsName[i], sizeRequired, NULL, NULL);
+ }
+
+ for (ULONG32 j = 0; j < numMap; j++)
{
- for (ULONG32 j = 0; j < numMap; j++)
+ SymbolsInfo& s = (*symInfo)[j];
+
+ if (j == 0) {
+ s.fileName[0] = 0;
+ s.lineNumber = 0;
+ s.fileIndex = 0;
+ } else {
+ s = (*symInfo)[j - 1];
+ }
+ s.nativeOffset = map[j].nativeStartOffset;
+ s.ilOffset = map[j].ilOffset;
+ s.source = map[j].source;
+ s.lineNumber = 0;
+
+ for (ULONG32 i = 0; i < methodDebugInfo.size; i++)
{
- if (methodDebugInfo->points[i].ilOffset == map[j].ilOffset)
- {
- SymbolsInfo& s = (*symInfo)[i];
- const SequencePointInfo& sp = methodDebugInfo->points[i];
+ const SequencePointInfo& sp = methodDebugInfo.points[i];
- s.nativeOffset = map[j].nativeStartOffset;
- s.ilOffset = map[j].ilOffset;
+ if (methodDebugInfo.points[i].ilOffset == map[j].ilOffset)
+ {
s.fileIndex = 0;
- //wcscpy(s.fileName, sp.fileName);
int len = WideCharToMultiByte(CP_UTF8, 0, sp.fileName, -1, s.fileName, sizeof(s.fileName), NULL, NULL);
s.fileName[len] = 0;
s.lineNumber = sp.lineNumber;
+ break;
}
}
}
- CoTaskMemFree(methodDebugInfo->points);
+ CoTaskMemFree(methodDebugInfo.points);
return S_OK;
}
+/* LEB128 for 32-bit unsigned integer */
+int Leb128Encode(uint32_t num, char* buf, int size)
+{
+ int i = 0;
+
+ do
+ {
+ uint8_t byte = num & 0x7F;
+ if (i >= size)
+ break;
+ num >>= 7;
+ if (num != 0)
+ byte |= 0x80;
+ buf[i++] = byte;
+ }
+ while (num != 0);
+
+ return i;
+}
+
+/* LEB128 for 32-bit signed integer */
+int Leb128Encode(int32_t num, char* buf, int size)
+{
+ int i = 0;
+ bool hasMore = true, isNegative = num < 0;
+
+ while (hasMore && i < size)
+ {
+ uint8_t byte = num & 0x7F;
+ num >>= 7;
+
+ if ((num == 0 && (byte & 0x40) == 0) || (num == -1 && (byte & 0x40) == 0x40))
+ hasMore = false;
+ else
+ byte |= 0x80;
+ buf[i++] = byte;
+ }
+
+ return i;
+}
+
+int GetFrameLocation(int nativeOffset, char* bufVarLoc)
+{
+ char cnvBuf[16] = {0};
+ int len = Leb128Encode(static_cast<int32_t>(nativeOffset), cnvBuf, sizeof(cnvBuf));
+ bufVarLoc[0] = len + 1;
+ bufVarLoc[1] = DW_OP_fbreg;
+ for (int j = 0; j < len; j++)
+ {
+ bufVarLoc[j + 2] = cnvBuf[j];
+ }
+
+ return len + 2; // We add '2' because first 2 bytes contain length of expression and DW_OP_fbreg operation.
+}
+
// GDB JIT interface
typedef enum
{
@@ -185,11 +624,24 @@ struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
/* Predefined section names */
const char* SectionNames[] = {
- "", ".text", ".shstrtab", ".debug_str", ".debug_abbrev", ".debug_info",
- ".debug_pubnames", ".debug_pubtypes", ".debug_line", ".symtab", ".strtab", ""
+ "",
+ ".text",
+ ".shstrtab",
+ ".debug_str",
+ ".debug_abbrev",
+ ".debug_info",
+ ".debug_pubnames",
+ ".debug_pubtypes",
+ ".debug_line",
+ ".symtab",
+ ".strtab"
+ /* After the last (.strtab) section zero or more .thunk_* sections are generated.
+
+ Each .thunk_* section contains a single .thunk_#.
+ These symbols are mapped to methods (or trampolines) called by currently compiled method. */
};
-const int SectionNamesCount = sizeof(SectionNames) / sizeof(SectionNames[0]);
+const int SectionNamesCount = sizeof(SectionNames) / sizeof(SectionNames[0]); // Does not include .thunk_* sections
/* Static data for section headers */
struct SectionHeader {
@@ -207,11 +659,12 @@ struct SectionHeader {
{SHT_PROGBITS, 0},
{SHT_SYMTAB, 0},
{SHT_STRTAB, 0},
+ {SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR}
};
/* Static data for .debug_str section */
const char* DebugStrings[] = {
- "CoreCLR", "" /* module name */, "" /* module path */, "" /* method name */, "int"
+ "CoreCLR", "" /* module name */, "" /* module path */
};
const int DebugStringCount = sizeof(DebugStrings) / sizeof(DebugStrings[0]);
@@ -221,11 +674,95 @@ const unsigned char AbbrevTable[] = {
1, DW_TAG_compile_unit, DW_CHILDREN_yes,
DW_AT_producer, DW_FORM_strp, DW_AT_language, DW_FORM_data2, DW_AT_name, DW_FORM_strp,
DW_AT_stmt_list, DW_FORM_sec_offset, 0, 0,
- 2, DW_TAG_subprogram, DW_CHILDREN_no,
+
+ 2, DW_TAG_base_type, DW_CHILDREN_no,
+ DW_AT_name, DW_FORM_strp, DW_AT_encoding, DW_FORM_data1, DW_AT_byte_size, DW_FORM_data1, 0, 0,
+
+ 3, DW_TAG_typedef, DW_CHILDREN_no,
DW_AT_name, DW_FORM_strp, DW_AT_decl_file, DW_FORM_data1, DW_AT_decl_line, DW_FORM_data1,
- DW_AT_type, DW_FORM_ref4, DW_AT_external, DW_FORM_flag_present, 0, 0,
- 3, DW_TAG_base_type, DW_CHILDREN_no,
- DW_AT_name, DW_FORM_strp, DW_AT_encoding, DW_FORM_data1, DW_AT_byte_size, DW_FORM_data1,0, 0,
+ DW_AT_type, DW_FORM_ref4, 0, 0,
+
+ 4, DW_TAG_subprogram, DW_CHILDREN_yes,
+ DW_AT_name, DW_FORM_strp, DW_AT_linkage_name, DW_FORM_strp, DW_AT_decl_file, DW_FORM_data1, DW_AT_decl_line, DW_FORM_data1,
+ DW_AT_type, DW_FORM_ref4, DW_AT_external, DW_FORM_flag_present,
+ DW_AT_low_pc, DW_FORM_addr, DW_AT_high_pc,
+#if defined(_TARGET_AMD64_)
+ DW_FORM_data8,
+#elif defined(_TARGET_ARM_)
+ DW_FORM_data4,
+#else
+#error Unsupported platform!
+#endif
+ DW_AT_frame_base, DW_FORM_exprloc, 0, 0,
+
+ 5, DW_TAG_variable, DW_CHILDREN_no,
+ DW_AT_name, DW_FORM_strp, DW_AT_decl_file, DW_FORM_data1, DW_AT_decl_line, DW_FORM_data1, DW_AT_type,
+ DW_FORM_ref4, DW_AT_location, DW_FORM_exprloc, 0, 0,
+
+ 6, DW_TAG_formal_parameter, DW_CHILDREN_no,
+ DW_AT_name, DW_FORM_strp, DW_AT_decl_file, DW_FORM_data1, DW_AT_decl_line, DW_FORM_data1, DW_AT_type,
+ DW_FORM_ref4, DW_AT_location, DW_FORM_exprloc, 0, 0,
+
+ 7, DW_TAG_class_type, DW_CHILDREN_yes,
+ DW_AT_name, DW_FORM_strp, DW_AT_byte_size, DW_FORM_data1, 0, 0,
+
+ 8, DW_TAG_member, DW_CHILDREN_no,
+ DW_AT_name, DW_FORM_strp, DW_AT_type, DW_FORM_ref4, DW_AT_data_member_location, DW_FORM_data4, 0, 0,
+
+ 9, DW_TAG_pointer_type, DW_CHILDREN_no,
+ DW_AT_type, DW_FORM_ref4, DW_AT_byte_size, DW_FORM_data1, 0, 0,
+
+ 10, DW_TAG_array_type, DW_CHILDREN_yes,
+ DW_AT_type, DW_FORM_ref4, 0, 0,
+
+ 11, DW_TAG_subrange_type, DW_CHILDREN_no,
+ DW_AT_upper_bound, DW_FORM_exprloc, 0, 0,
+
+ 12, DW_TAG_subprogram, DW_CHILDREN_yes,
+ DW_AT_name, DW_FORM_strp, DW_AT_linkage_name, DW_FORM_strp, DW_AT_decl_file, DW_FORM_data1, DW_AT_decl_line, DW_FORM_data1,
+ DW_AT_type, DW_FORM_ref4, DW_AT_external, DW_FORM_flag_present,
+ DW_AT_low_pc, DW_FORM_addr, DW_AT_high_pc,
+#if defined(_TARGET_AMD64_)
+ DW_FORM_data8,
+#elif defined(_TARGET_ARM_)
+ DW_FORM_data4,
+#else
+#error Unsupported platform!
+#endif
+ DW_AT_frame_base, DW_FORM_exprloc, DW_AT_object_pointer, DW_FORM_ref4, 0, 0,
+
+ 13, DW_TAG_formal_parameter, DW_CHILDREN_no,
+ DW_AT_name, DW_FORM_strp, DW_AT_decl_file, DW_FORM_data1, DW_AT_decl_line, DW_FORM_data1, DW_AT_type,
+ DW_FORM_ref4, DW_AT_location, DW_FORM_exprloc, DW_AT_artificial, DW_FORM_flag_present, 0, 0,
+
+ 14, DW_TAG_member, DW_CHILDREN_no,
+ DW_AT_name, DW_FORM_strp, DW_AT_type, DW_FORM_ref4, DW_AT_external, DW_FORM_flag_present, 0, 0,
+
+ 15, DW_TAG_variable, DW_CHILDREN_no, DW_AT_specification, DW_FORM_ref4, DW_AT_location, DW_FORM_exprloc,
+ 0, 0,
+
+ 16, DW_TAG_try_block, DW_CHILDREN_no,
+ DW_AT_low_pc, DW_FORM_addr, DW_AT_high_pc,
+#if defined(_TARGET_AMD64_)
+ DW_FORM_data8,
+#elif defined(_TARGET_ARM_)
+ DW_FORM_data4,
+#else
+#error Unsupported platform!
+#endif
+ 0, 0,
+
+ 17, DW_TAG_catch_block, DW_CHILDREN_no,
+ DW_AT_low_pc, DW_FORM_addr, DW_AT_high_pc,
+#if defined(_TARGET_AMD64_)
+ DW_FORM_data8,
+#elif defined(_TARGET_ARM_)
+ DW_FORM_data4,
+#else
+#error Unsupported platform!
+#endif
+ 0, 0,
+
0
};
@@ -241,53 +778,781 @@ DwarfLineNumHeader LineNumHeader = {
};
/* Static data for .debug_info */
-struct __attribute__((packed)) DebugInfo
+struct __attribute__((packed)) DebugInfoCU
{
uint8_t m_cu_abbrev;
uint32_t m_prod_off;
uint16_t m_lang;
uint32_t m_cu_name;
uint32_t m_line_num;
-
+} debugInfoCU = {
+ 1, 0, DW_LANG_C89, 0, 0
+};
+
+struct __attribute__((packed)) DebugInfoTryCatchSub
+{
+ uint8_t m_sub_abbrev;
+#if defined(_TARGET_AMD64_)
+ uint64_t m_sub_low_pc, m_sub_high_pc;
+#elif defined(_TARGET_ARM_)
+ uint32_t m_sub_low_pc, m_sub_high_pc;
+#else
+#error Unsupported platform!
+#endif
+};
+
+struct __attribute__((packed)) DebugInfoSub
+{
uint8_t m_sub_abbrev;
uint32_t m_sub_name;
+ uint32_t m_linkage_name;
uint8_t m_file, m_line;
uint32_t m_sub_type;
-
+#if defined(_TARGET_AMD64_)
+ uint64_t m_sub_low_pc, m_sub_high_pc;
+#elif defined(_TARGET_ARM_)
+ uint32_t m_sub_low_pc, m_sub_high_pc;
+#else
+#error Unsupported platform!
+#endif
+ uint8_t m_sub_loc[2];
+};
+
+struct __attribute__((packed)) DebugInfoSubMember
+{
+ DebugInfoSub sub;
+ uint32_t m_obj_ptr;
+};
+
+// Holder for array of pointers to FunctionMember objects
+class FunctionMemberPtrArrayHolder : public NewArrayHolder<FunctionMember*>
+{
+private:
+ int m_cElements;
+
+ void DeleteElements()
+ {
+ for (int i = 0; i < m_cElements; i++)
+ {
+ delete this->m_value[i];
+ }
+ }
+
+public:
+ FunctionMemberPtrArrayHolder() :
+ NewArrayHolder<FunctionMember*>(),
+ m_cElements(0)
+ {
+ }
+
+ bool Alloc(int cElements)
+ {
+ FunctionMember** value = new (nothrow) FunctionMember*[cElements];
+ if (value == nullptr)
+ return false;
+
+ for (int i = 0; i < cElements; i++)
+ {
+ value[i] = nullptr;
+ }
+
+ // Clean previous elements
+ DeleteElements();
+
+ NewArrayHolder<FunctionMember*>::operator=(value);
+ m_cElements = cElements;
+ return true;
+ }
+
+ int GetCount() const
+ {
+ return m_cElements;
+ }
+
+ ~FunctionMemberPtrArrayHolder()
+ {
+ DeleteElements();
+ }
+};
+
+static FunctionMemberPtrArrayHolder method;
+
+struct __attribute__((packed)) DebugInfoType
+{
uint8_t m_type_abbrev;
uint32_t m_type_name;
uint8_t m_encoding;
uint8_t m_byte_size;
-} debugInfo = {
- 1, 0, DW_LANG_C89, 0, 0,
- 2, 0, 1, 1, 37,
- 3, 0, DW_ATE_signed, 4
};
+struct __attribute__((packed)) DebugInfoVar
+{
+ uint8_t m_var_abbrev;
+ uint32_t m_var_name;
+ uint8_t m_var_file, m_var_line;
+ uint32_t m_var_type;
+};
+
+struct __attribute__((packed)) DebugInfoClassType
+{
+ uint8_t m_type_abbrev;
+ uint32_t m_type_name;
+ uint8_t m_byte_size;
+};
+
+struct __attribute__((packed)) DebugInfoClassMember
+{
+ uint8_t m_member_abbrev;
+ uint32_t m_member_name;
+ uint32_t m_member_type;
+};
+
+struct __attribute__((packed)) DebugInfoStaticMember
+{
+ uint8_t m_member_abbrev;
+ uint32_t m_member_specification;
+};
+
+
+struct __attribute__((packed)) DebugInfoRefType
+{
+ uint8_t m_type_abbrev;
+ uint32_t m_ref_type;
+ uint8_t m_byte_size;
+};
+
+struct __attribute__((packed)) DebugInfoArrayType
+{
+ uint8_t m_abbrev;
+ uint32_t m_type;
+};
+
+void TypeInfoBase::DumpStrings(char* ptr, int& offset)
+{
+ if (ptr != nullptr)
+ {
+ strcpy(ptr + offset, m_type_name);
+ m_type_name_offset = offset;
+ }
+ offset += strlen(m_type_name) + 1;
+}
+
+void TypeInfoBase::CalculateName()
+{
+ // name the type
+ SString sName;
+ typeHandle.GetName(sName);
+ StackScratchBuffer buffer;
+ const UTF8 *utf8 = sName.GetUTF8(buffer);
+ m_type_name = new char[strlen(utf8) + 1];
+ strcpy(m_type_name, utf8);
+}
+
+void TypeInfoBase::SetTypeHandle(TypeHandle handle)
+{
+ typeHandle = handle;
+ typeKey = handle.GetTypeKey();
+}
+
+TypeHandle TypeInfoBase::GetTypeHandle()
+{
+ return typeHandle;
+}
+
+TypeKey* TypeInfoBase::GetTypeKey()
+{
+ return &typeKey;
+}
+
+void PrimitiveTypeInfo::DumpDebugInfo(char* ptr, int& offset)
+{
+ if (m_type_offset != 0)
+ {
+ return;
+ }
+
+ if (ptr != nullptr)
+ {
+ DebugInfoType bufType;
+ bufType.m_type_abbrev = 2;
+ bufType.m_type_name = m_type_name_offset;
+ bufType.m_encoding = m_type_encoding;
+ bufType.m_byte_size = m_type_size;
+
+ memcpy(ptr + offset,
+ &bufType,
+ sizeof(DebugInfoType));
+ m_type_offset = offset;
+ }
+
+ offset += sizeof(DebugInfoType);
+}
+
+ClassTypeInfo::ClassTypeInfo(TypeHandle typeHandle, int num_members)
+ : TypeInfoBase(typeHandle),
+ m_num_members(num_members),
+ members(new TypeMember[num_members])
+{
+}
+
+ClassTypeInfo::~ClassTypeInfo()
+{
+ if (members != nullptr && m_num_members > 0)
+ {
+ delete[] members;
+ }
+}
+
+void TypeMember::DumpStrings(char* ptr, int& offset)
+{
+ if (ptr != nullptr)
+ {
+ strcpy(ptr + offset, m_member_name);
+ m_member_name_offset = offset;
+ }
+ offset += strlen(m_member_name) + 1;
+}
+
+void TypeMember::DumpDebugInfo(char* ptr, int& offset)
+{
+ if (ptr != nullptr)
+ {
+ DebugInfoClassMember memberEntry;
+
+ if (m_static_member_address == 0)
+ memberEntry.m_member_abbrev = 8;
+ else
+ {
+ memberEntry.m_member_abbrev = 14;
+ m_member_offset = offset;
+ }
+ memberEntry.m_member_name = m_member_name_offset;
+ memberEntry.m_member_type = m_member_type->m_type_offset;
+
+ memcpy(ptr + offset, &memberEntry, sizeof(DebugInfoClassMember));
+ if (m_static_member_address == 0)
+ memcpy(ptr + offset + sizeof(DebugInfoClassMember), &m_member_offset, sizeof(m_member_offset));
+ }
+ offset += sizeof(DebugInfoClassMember);
+ if (m_static_member_address == 0)
+ offset += sizeof(m_member_offset);
+}
+
+void TypeMember::DumpStaticDebugInfo(char* ptr, int& offset)
+{
+ const int ptrSize = sizeof(TADDR);
+ if (ptr != nullptr)
+ {
+ DebugInfoStaticMember memberEntry;
+
+ memberEntry.m_member_abbrev = 15;
+ memberEntry.m_member_specification = m_member_offset;
+ memcpy(ptr + offset, &memberEntry, sizeof(DebugInfoStaticMember));
+
+ char buf[ptrSize + 2] = {0};
+ buf[0] = ptrSize + 1;
+ buf[1] = DW_OP_addr;
+
+ for (int i = 0; i < ptrSize; i++)
+ {
+ buf[i + 2] = m_static_member_address >> (i * 8);
+ }
+
+ memcpy(ptr + offset + sizeof(DebugInfoStaticMember), &buf, ptrSize + 2);
+ }
+ offset += sizeof(DebugInfoStaticMember);
+ offset += ptrSize + 2;
+}
+
+void FunctionMember::MangleName(char *buf, int &buf_offset, const char *name)
+{
+ int name_length = strlen(name);
+
+ char tmp[20];
+ int tmp_len = sprintf_s(tmp, _countof(tmp), "%i", name_length);
+ if (tmp_len <= 0)
+ return;
+
+ if (buf)
+ strncpy(buf + buf_offset, tmp, tmp_len);
+ buf_offset += tmp_len;
+
+ if (buf)
+ {
+ for (int i = 0; i < name_length; i++)
+ {
+ char c = name[i];
+ bool valid = (c >= 'a' && c <= 'z') ||
+ (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9');
+ *(buf + buf_offset + i) = valid ? c : '_';
+ }
+ }
+ buf_offset += name_length;
+}
+
+void FunctionMember::DumpMangledNamespaceAndMethod(char *buf, int &offset, const char *nspace, const char *mname)
+{
+ static const char *begin_mangled = "_ZN";
+ static const char *end_mangled = "Ev";
+ static const int begin_mangled_len = strlen(begin_mangled);
+ static const int end_mangled_len = strlen(end_mangled);
+
+ if (buf)
+ strncpy(buf + offset, begin_mangled, begin_mangled_len);
+ offset += begin_mangled_len;
+
+ MangleName(buf, offset, nspace);
+ MangleName(buf, offset, mname);
+
+ if (buf)
+ strncpy(buf + offset, end_mangled, end_mangled_len);
+ offset += end_mangled_len;
+
+ if (buf)
+ buf[offset] = '\0';
+ ++offset;
+}
+
+void FunctionMember::DumpLinkageName(char* ptr, int& offset)
+{
+ SString namespaceOrClassName;
+ SString methodName;
+
+ md->GetMethodInfoNoSig(namespaceOrClassName, methodName);
+ SString utf8namespaceOrClassName;
+ SString utf8methodName;
+ namespaceOrClassName.ConvertToUTF8(utf8namespaceOrClassName);
+ methodName.ConvertToUTF8(utf8methodName);
+
+ const char *nspace = utf8namespaceOrClassName.GetUTF8NoConvert();
+ const char *mname = utf8methodName.GetUTF8NoConvert();
+
+ if (!nspace || !mname)
+ {
+ m_linkage_name_offset = 0;
+ return;
+ }
+
+ m_linkage_name_offset = offset;
+ DumpMangledNamespaceAndMethod(ptr, offset, nspace, mname);
+}
+
+void FunctionMember::DumpStrings(char* ptr, int& offset)
+{
+ TypeMember::DumpStrings(ptr, offset);
+
+ for (int i = 0; i < m_num_vars; ++i)
+ {
+ vars[i].DumpStrings(ptr, offset);
+ }
+
+ DumpLinkageName(ptr, offset);
+}
+
+bool FunctionMember::GetBlockInNativeCode(int blockILOffset, int blockILLen, TADDR *startOffset, TADDR *endOffset)
+{
+ PCODE pCode = md->GetNativeCode();
+
+ const int blockILEnd = blockILOffset + blockILLen;
+
+ *startOffset = 0;
+ *endOffset = 0;
+
+ bool inBlock = false;
+
+ for (int i = 0; i < nlines; ++i)
+ {
+ TADDR nativeOffset = lines[i].nativeOffset + pCode;
+
+ // Limit block search to current function addresses
+ if (nativeOffset < m_sub_low_pc)
+ continue;
+ if (nativeOffset >= m_sub_low_pc + m_sub_high_pc)
+ break;
+
+ // Skip invalid IL offsets
+ switch(lines[i].ilOffset)
+ {
+ case ICorDebugInfo::PROLOG:
+ case ICorDebugInfo::EPILOG:
+ case ICorDebugInfo::NO_MAPPING:
+ continue;
+ default:
+ break;
+ }
+
+ // Check if current IL is within block
+ if (blockILOffset <= lines[i].ilOffset && lines[i].ilOffset < blockILEnd)
+ {
+ if (!inBlock)
+ {
+ *startOffset = lines[i].nativeOffset;
+ inBlock = true;
+ }
+ }
+ else
+ {
+ if (inBlock)
+ {
+ *endOffset = lines[i].nativeOffset;
+ inBlock = false;
+ break;
+ }
+ }
+ }
+
+ if (inBlock)
+ {
+ *endOffset = m_sub_low_pc + m_sub_high_pc - pCode;
+ }
+
+ return *endOffset != *startOffset;
+}
+
+void FunctionMember::DumpTryCatchBlock(char* ptr, int& offset, int ilOffset, int ilLen, int abbrev)
+{
+ TADDR startOffset;
+ TADDR endOffset;
+
+ if (!GetBlockInNativeCode(ilOffset, ilLen, &startOffset, &endOffset))
+ return;
+
+ if (ptr != nullptr)
+ {
+ DebugInfoTryCatchSub subEntry;
+
+ subEntry.m_sub_abbrev = abbrev;
+ subEntry.m_sub_low_pc = md->GetNativeCode() + startOffset;
+ subEntry.m_sub_high_pc = endOffset - startOffset;
+
+ memcpy(ptr + offset, &subEntry, sizeof(DebugInfoTryCatchSub));
+ }
+ offset += sizeof(DebugInfoTryCatchSub);
+}
+
+void FunctionMember::DumpTryCatchDebugInfo(char* ptr, int& offset)
+{
+ if (!md)
+ return;
+
+ COR_ILMETHOD *pHeader = md->GetILHeader();
+ COR_ILMETHOD_DECODER header(pHeader);
+
+ unsigned ehCount = header.EHCount();
+
+ for (unsigned e = 0; e < ehCount; e++)
+ {
+ IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT ehBuff;
+ const IMAGE_COR_ILMETHOD_SECT_EH_CLAUSE_FAT* ehInfo;
+
+ ehInfo = header.EH->EHClause(e, &ehBuff);
+
+ DumpTryCatchBlock(ptr, offset, ehInfo->TryOffset, ehInfo->TryLength, 16);
+ DumpTryCatchBlock(ptr, offset, ehInfo->HandlerOffset, ehInfo->HandlerLength, 17);
+ }
+}
+
+void FunctionMember::DumpDebugInfo(char* ptr, int& offset)
+{
+ if (ptr != nullptr)
+ {
+ DebugInfoSub subEntry;
+
+ subEntry.m_sub_abbrev = 4;
+ subEntry.m_sub_name = m_member_name_offset;
+ subEntry.m_linkage_name = m_linkage_name_offset;
+ subEntry.m_file = m_file;
+ subEntry.m_line = m_line;
+ subEntry.m_sub_type = m_member_type->m_type_offset;
+ subEntry.m_sub_low_pc = m_sub_low_pc;
+ subEntry.m_sub_high_pc = m_sub_high_pc;
+ subEntry.m_sub_loc[0] = m_sub_loc[0];
+ subEntry.m_sub_loc[1] = m_sub_loc[1];
+
+ if (!md->IsStatic())
+ {
+ DebugInfoSubMember subMemberEntry;
+ subEntry.m_sub_abbrev = 12;
+ subMemberEntry.sub = subEntry;
+ subMemberEntry.m_obj_ptr = offset+sizeof(DebugInfoSubMember);
+ memcpy(ptr + offset, &subMemberEntry, sizeof(DebugInfoSubMember));
+ }
+ else
+ {
+ memcpy(ptr + offset, &subEntry, sizeof(DebugInfoSub));
+ }
+ m_entry_offset = offset;
+ dumped = true;
+ }
+
+ if (!md->IsStatic())
+ {
+ offset += sizeof(DebugInfoSubMember);
+ }
+ else
+ {
+ offset += sizeof(DebugInfoSub);
+ }
+ for (int i = 0; i < m_num_vars; ++i)
+ {
+ vars[i].DumpDebugInfo(ptr, offset);
+ }
+
+ DumpTryCatchDebugInfo(ptr, offset);
+
+ // terminate children
+ if (ptr != nullptr)
+ {
+ ptr[offset] = 0;
+ }
+ offset++;
+}
+
+int FunctionMember::GetArgsAndLocalsLen()
+{
+ int locSize = 0;
+ char tmpBuf[16];
+
+ // Format for DWARF location expression: [expression length][operation][offset in SLEB128 encoding]
+ for (int i = 0; i < m_num_vars; i++)
+ {
+ locSize += 2; // First byte contains expression length, second byte contains operation (DW_OP_fbreg).
+ locSize += Leb128Encode(static_cast<int32_t>(vars[i].m_native_offset), tmpBuf, sizeof(tmpBuf));
+ }
+ return locSize;
+}
+
+void ClassTypeInfo::DumpStrings(char* ptr, int& offset)
+{
+ TypeInfoBase::DumpStrings(ptr, offset);
+
+ for (int i = 0; i < m_num_members; ++i)
+ {
+ members[i].DumpStrings(ptr, offset);
+ }
+}
+
+void RefTypeInfo::DumpStrings(char* ptr, int& offset)
+{
+ TypeInfoBase::DumpStrings(ptr, offset);
+ m_value_type->DumpStrings(ptr, offset);
+}
+
+void RefTypeInfo::DumpDebugInfo(char* ptr, int& offset)
+{
+ if (m_type_offset != 0)
+ {
+ return;
+ }
+ m_type_offset = offset;
+ offset += sizeof(DebugInfoRefType);
+ m_value_type->DumpDebugInfo(ptr, offset);
+ if (ptr != nullptr)
+ {
+ DebugInfoRefType refType;
+ refType.m_type_abbrev = 9;
+ refType.m_ref_type = m_value_type->m_type_offset;
+ refType.m_byte_size = m_type_size;
+ memcpy(ptr + m_type_offset, &refType, sizeof(DebugInfoRefType));
+ }
+ else
+ {
+ m_type_offset = 0;
+ }
+}
+void ClassTypeInfo::DumpDebugInfo(char* ptr, int& offset)
+{
+ if (m_type_offset != 0)
+ {
+ return;
+ }
+ // make sure that types of all members are dumped
+ for (int i = 0; i < m_num_members; ++i)
+ {
+ if (members[i].m_member_type->m_type_offset == 0 && members[i].m_member_type != this)
+ {
+ members[i].m_member_type->DumpDebugInfo(ptr, offset);
+ }
+ }
+
+ if (ptr != nullptr)
+ {
+ DebugInfoClassType bufType;
+ bufType.m_type_abbrev = 7;
+ bufType.m_type_name = m_type_name_offset;
+ bufType.m_byte_size = m_type_size;
+
+ memcpy(ptr + offset, &bufType, sizeof(DebugInfoClassType));
+ m_type_offset = offset;
+ }
+ offset += sizeof(DebugInfoClassType);
+
+ for (int i = 0; i < m_num_members; ++i)
+ {
+ members[i].DumpDebugInfo(ptr, offset);
+ }
+
+ for (int i = 0; i < method.GetCount(); ++i)
+ {
+ if (method[i]->md->GetMethodTable() == GetTypeHandle().GetMethodTable())
+ {
+ // our method is part of this class, we should dump it now before terminating members
+ method[i]->DumpDebugInfo(ptr, offset);
+ }
+ }
+
+ // members terminator
+ if (ptr != nullptr)
+ {
+ ptr[offset] = 0;
+ }
+ offset++;
+
+ for (int i = 0; i < m_num_members; ++i)
+ {
+ if (members[i].m_static_member_address != 0)
+ members[i].DumpStaticDebugInfo(ptr, offset);
+ }
+
+}
+
+void ArrayTypeInfo::DumpDebugInfo(char* ptr, int& offset)
+{
+ if (m_type_offset != 0)
+ {
+ return;
+ }
+ if (m_elem_type->m_type_offset == 0)
+ {
+ m_elem_type->DumpDebugInfo(ptr, offset);
+ }
+ if (ptr != nullptr)
+ {
+ DebugInfoArrayType arrType;
+
+ arrType.m_abbrev = 10; // DW_TAG_array_type abbrev
+ arrType.m_type = m_elem_type->m_type_offset;
+
+ memcpy(ptr + offset, &arrType, sizeof(DebugInfoArrayType));
+ m_type_offset = offset;
+ }
+ offset += sizeof(DebugInfoArrayType);
+
+ char tmp[16] = { 0 };
+ int len = Leb128Encode(static_cast<int32_t>(m_count_offset), tmp, sizeof(tmp));
+ if (ptr != nullptr)
+ {
+ char buf[64];
+ buf[0] = 11; // DW_TAG_subrange_type abbrev
+ buf[1] = len + 3;
+ buf[2] = DW_OP_push_object_address;
+ buf[3] = DW_OP_plus_uconst;
+ for (int j = 0; j < len; j++)
+ {
+ buf[j + 4] = tmp[j];
+ }
+ buf[len + 4] = DW_OP_deref;
+
+ memcpy(ptr + offset, buf, len + 5);
+ }
+ offset += (len + 5);
+
+ if (ptr != nullptr)
+ {
+ memset(ptr + offset, 0, 1);
+ }
+ offset += 1;
+}
+
+void VarDebugInfo::DumpStrings(char *ptr, int& offset)
+{
+ if (ptr != nullptr)
+ {
+ strcpy(ptr + offset, m_var_name);
+ m_var_name_offset = offset;
+ }
+ offset += strlen(m_var_name) + 1;
+}
+
+void VarDebugInfo::DumpDebugInfo(char* ptr, int& offset)
+{
+ char bufVarLoc[16];
+ int len = GetFrameLocation(m_native_offset, bufVarLoc);
+ if (ptr != nullptr)
+ {
+ DebugInfoVar bufVar;
+
+ bufVar.m_var_abbrev = m_var_abbrev;
+ bufVar.m_var_name = m_var_name_offset;
+ bufVar.m_var_file = 1;
+ bufVar.m_var_line = 1;
+ bufVar.m_var_type = m_var_type->m_type_offset;
+ memcpy(ptr + offset, &bufVar, sizeof(DebugInfoVar));
+ memcpy(ptr + offset + sizeof(DebugInfoVar), bufVarLoc, len);
+ }
+ offset += sizeof(DebugInfoVar);
+ offset += len;
+}
+
/* static data for symbol strings */
-const char* SymbolNames[] = {
- "", ""
+struct Elf_Symbol {
+ const char* m_name;
+ int m_off;
+ TADDR m_value;
+ int m_section, m_size;
+ bool m_releaseName;
+ Elf_Symbol() : m_name(nullptr), m_off(0), m_value(0), m_section(0), m_size(0), m_releaseName(false) {}
+ ~Elf_Symbol()
+ {
+ if (m_releaseName)
+ delete [] m_name;
+ }
};
+static int countFuncs(const SymbolsInfo *lines, int nlines)
+{
+ int count = 0;
+ for (int i = 0; i < nlines; i++) {
+ if (lines[i].ilOffset == ICorDebugInfo::PROLOG)
+ {
+ count++;
+ }
+ }
+ return count;
+}
+
+static int getNextPrologueIndex(int from, const SymbolsInfo *lines, int nlines)
+{
+ for (int i = from; i < nlines; ++i) {
+ if (lines[i].ilOffset == ICorDebugInfo::PROLOG)
+ {
+ return i;
+ }
+ }
+ return -1;
+}
+
+int SymbolCount = 0;
+NewArrayHolder<Elf_Symbol> SymbolNames;
+NotifyGdb::AddrSet codeAddrs;
/* Create ELF/DWARF debug info for jitted method */
void NotifyGdb::MethodCompiled(MethodDesc* MethodDescPtr)
{
PCODE pCode = MethodDescPtr->GetNativeCode();
-
if (pCode == NULL)
return;
unsigned int symInfoLen = 0;
NewArrayHolder<SymbolsInfo> symInfo = nullptr;
+ LocalsInfo locals;
/* Get method name & size of jitted code */
LPCUTF8 methodName = MethodDescPtr->GetName();
EECodeInfo codeInfo(pCode);
TADDR codeSize = codeInfo.GetCodeManager()->GetFunctionSize(codeInfo.GetGCInfoToken());
-
-#ifdef _TARGET_ARM_
- pCode &= ~1; // clear thumb flag for debug info
-#endif
+
+ pCode = PCODEToPINSTR(pCode);
/* Get module name */
const Module* mod = MethodDescPtr->GetMethodTable()->GetModule();
@@ -357,15 +1622,82 @@ void NotifyGdb::MethodCompiled(MethodDesc* MethodDescPtr)
}
if (isUserDebug == FALSE)
+ {
return;
+ }
+
+ NewHolder<TK_TypeInfoMap> pTypeMap = new TK_TypeInfoMap();
+
+ if (pTypeMap == nullptr)
+ {
+ return;
+ }
/* Get debug info for method from portable PDB */
- HRESULT hr = GetDebugInfoFromPDB(MethodDescPtr, &symInfo, symInfoLen);
+ HRESULT hr = GetDebugInfoFromPDB(MethodDescPtr, &symInfo, symInfoLen, locals);
if (FAILED(hr) || symInfoLen == 0)
{
return;
}
+ int method_count = countFuncs(symInfo, symInfoLen);
+ if (!method.Alloc(method_count)) {
+ return;
+ }
+
+ CodeHeader* pCH = (CodeHeader*)pCode - 1;
+ CalledMethod* pCalledMethods = reinterpret_cast<CalledMethod*>(pCH->GetCalledMethods());
+ /* Collect addresses of thunks called by method */
+ if (!CollectCalledMethods(pCalledMethods, (TADDR)MethodDescPtr->GetNativeCode()))
+ {
+ return;
+ }
+ pCH->SetCalledMethods(NULL);
+
+ MetaSig sig(MethodDescPtr);
+ int nArgsCount = sig.NumFixedArgs();
+ if (sig.HasThis())
+ nArgsCount++;
+
+ unsigned int firstLineIndex = 0;
+ for (;firstLineIndex < symInfoLen; firstLineIndex++) {
+ if (symInfo[firstLineIndex].lineNumber != 0 && symInfo[firstLineIndex].lineNumber != HiddenLine) break;
+ }
+
+ if (firstLineIndex >= symInfoLen)
+ {
+ return;
+ }
+
+ int start_index = getNextPrologueIndex(0, symInfo, symInfoLen);
+
+ for (int method_index = 0; method_index < method.GetCount(); ++method_index)
+ {
+ method[method_index] = new FunctionMember(MethodDescPtr, locals.size, nArgsCount);
+
+ int end_index = getNextPrologueIndex(start_index + 1, symInfo, symInfoLen);
+
+ PCODE method_start = symInfo[start_index].nativeOffset;
+ TADDR method_size = end_index == -1 ? codeSize - method_start : symInfo[end_index].nativeOffset - method_start;
+
+ // method return type
+ method[method_index]->m_member_type = GetArgTypeInfo(MethodDescPtr, pTypeMap, 0);
+ method[method_index]->GetLocalsDebugInfo(pTypeMap, locals, symInfo[firstLineIndex].nativeOffset);
+ method[method_index]->m_sub_low_pc = pCode + method_start;
+ method[method_index]->m_sub_high_pc = method_size;
+ size_t methodNameSize = strlen(methodName) + 10;
+ method[method_index]->m_member_name = new char[methodNameSize];
+ if (method_index == 0)
+ sprintf_s(method[method_index]->m_member_name, methodNameSize, "%s", methodName);
+ else
+ sprintf_s(method[method_index]->m_member_name, methodNameSize, "%s_%i", methodName, method_index);
+
+ // method's class
+ GetTypeInfoFromTypeHandle(TypeHandle(method[method_index]->md->GetMethodTable()), pTypeMap);
+
+ start_index = end_index;
+ }
+
MemBuf elfHeader, sectHeaders, sectStr, sectSymTab, sectStrTab, dbgInfo, dbgAbbrev, dbgPubname, dbgPubType, dbgLine,
dbgStr, elfFile;
@@ -374,42 +1706,52 @@ void NotifyGdb::MethodCompiled(MethodDesc* MethodDescPtr)
{
return;
}
-
+
/* Build .debug_line section */
- if (!BuildLineTable(dbgLine, pCode, symInfo, symInfoLen))
+ if (!BuildLineTable(dbgLine, pCode, codeSize, symInfo, symInfoLen))
{
return;
}
DebugStrings[1] = szModuleFile;
- DebugStrings[3] = methodName;
/* Build .debug_str section */
- if (!BuildDebugStrings(dbgStr))
+ if (!BuildDebugStrings(dbgStr, pTypeMap))
{
return;
}
/* Build .debug_info section */
- if (!BuildDebugInfo(dbgInfo))
+ if (!BuildDebugInfo(dbgInfo, pTypeMap, symInfo, symInfoLen))
{
return;
}
-
+
+ for (int i = 0; i < locals.size; i++)
+ {
+ delete[] locals.localsName[i];
+ }
/* Build .debug_pubname section */
- if (!BuildDebugPub(dbgPubname, methodName, dbgInfo.MemSize, 26))
+ if (!BuildDebugPub(dbgPubname, methodName, dbgInfo.MemSize, 0x28))
{
return;
}
/* Build debug_pubtype section */
- if (!BuildDebugPub(dbgPubType, "int", dbgInfo.MemSize, 37))
+ if (!BuildDebugPub(dbgPubType, "int", dbgInfo.MemSize, 0x1a))
{
return;
}
-
+
/* Build .strtab section */
- SymbolNames[1] = methodName;
+ SymbolNames[0].m_name = "";
+ for (int i = 0; i < method.GetCount(); ++i)
+ {
+ SymbolNames[1 + i].m_name = method[i]->m_member_name;
+ SymbolNames[1 + i].m_value = method[i]->m_sub_low_pc;
+ SymbolNames[1 + i].m_section = 1;
+ SymbolNames[1 + i].m_size = method[i]->m_sub_high_pc;
+ }
if (!BuildStringTableSection(sectStrTab))
{
return;
@@ -420,15 +1762,8 @@ void NotifyGdb::MethodCompiled(MethodDesc* MethodDescPtr)
return;
}
-
- /* Build section names section */
- if (!BuildSectionNameTable(sectStr))
- {
- return;
- }
-
- /* Build section headers table */
- if (!BuildSectionTable(sectHeaders))
+ /* Build section headers table and section names table */
+ if (!BuildSectionTables(sectHeaders, sectStr))
{
return;
}
@@ -470,13 +1805,21 @@ void NotifyGdb::MethodCompiled(MethodDesc* MethodDescPtr)
++pShdr; // .symtab
pShdr->sh_offset = offset;
pShdr->sh_size = sectSymTab.MemSize;
- pShdr->sh_link = 10;
+ pShdr->sh_link = GetSectionIndex(".strtab");
offset += sectSymTab.MemSize;
++pShdr; // .strtab
pShdr->sh_offset = offset;
pShdr->sh_size = sectStrTab.MemSize;
offset += sectStrTab.MemSize;
-
+
+ // .thunks
+ for (int i = 1 + method.GetCount(); i < SymbolCount; i++)
+ {
+ ++pShdr;
+ pShdr->sh_addr = PCODEToPINSTR(SymbolNames[i].m_value);
+ pShdr->sh_size = 8;
+ }
+
/* Build ELF header */
if (!BuildELFHeader(elfHeader))
{
@@ -493,8 +1836,9 @@ void NotifyGdb::MethodCompiled(MethodDesc* MethodDescPtr)
#endif
header->e_shoff = offset;
header->e_shentsize = sizeof(Elf_Shdr);
- header->e_shnum = SectionNamesCount - 1;
- header->e_shstrndx = 2;
+ int thunks_count = SymbolCount - method.GetCount() - 1;
+ header->e_shnum = SectionNamesCount + thunks_count;
+ header->e_shstrndx = GetSectionIndex(".shstrtab");
/* Build ELF image in memory */
elfFile.MemSize = elfHeader.MemSize + sectStr.MemSize + dbgStr.MemSize + dbgAbbrev.MemSize + dbgInfo.MemSize +
@@ -531,12 +1875,14 @@ void NotifyGdb::MethodCompiled(MethodDesc* MethodDescPtr)
memcpy(elfFile.MemPtr + offset, sectHeaders.MemPtr, sectHeaders.MemSize);
+ elfFile.MemPtr.SuppressRelease();
+
#ifdef GDBJIT_DUMPELF
DumpElf(methodName, elfFile);
-#endif
-
+#endif
+
/* Create GDB JIT structures */
- jit_code_entry* jit_symbols = new (nothrow) jit_code_entry;
+ NewHolder<jit_code_entry> jit_symbols = new (nothrow) jit_code_entry;
if (jit_symbols == nullptr)
{
@@ -557,15 +1903,21 @@ void NotifyGdb::MethodCompiled(MethodDesc* MethodDescPtr)
head->prev_entry = jit_symbols;
}
+ jit_symbols.SuppressRelease();
+
/* Notify the debugger */
__jit_debug_descriptor.relevant_entry = jit_symbols;
__jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
__jit_debug_register_code();
-
}
void NotifyGdb::MethodDropped(MethodDesc* MethodDescPtr)
{
+ static const int textSectionIndex = GetSectionIndex(".text");
+
+ if (textSectionIndex < 0)
+ return;
+
PCODE pCode = MethodDescPtr->GetNativeCode();
if (pCode == NULL)
@@ -579,7 +1931,7 @@ void NotifyGdb::MethodDropped(MethodDesc* MethodDescPtr)
const Elf_Ehdr* pEhdr = reinterpret_cast<const Elf_Ehdr*>(ptr);
const Elf_Shdr* pShdr = reinterpret_cast<const Elf_Shdr*>(ptr + pEhdr->e_shoff);
- ++pShdr; // bump to .text section
+ pShdr += textSectionIndex; // bump to .text section
if (pShdr->sh_addr == pCode)
{
/* Notify the debugger */
@@ -602,7 +1954,7 @@ void NotifyGdb::MethodDropped(MethodDesc* MethodDescPtr)
}
/* Build the DWARF .debug_line section */
-bool NotifyGdb::BuildLineTable(MemBuf& buf, PCODE startAddr, SymbolsInfo* lines, unsigned nlines)
+bool NotifyGdb::BuildLineTable(MemBuf& buf, PCODE startAddr, TADDR codeSize, SymbolsInfo* lines, unsigned nlines)
{
MemBuf fileTable, lineProg;
@@ -610,7 +1962,7 @@ bool NotifyGdb::BuildLineTable(MemBuf& buf, PCODE startAddr, SymbolsInfo* lines,
if (!BuildFileTable(fileTable, lines, nlines))
return false;
/* Build line info program */
- if (!BuildLineProg(lineProg, startAddr, lines, nlines))
+ if (!BuildLineProg(lineProg, startAddr, codeSize, lines, nlines))
{
return false;
}
@@ -640,7 +1992,7 @@ bool NotifyGdb::BuildLineTable(MemBuf& buf, PCODE startAddr, SymbolsInfo* lines,
/* Buid the source files table for DWARF source line info */
bool NotifyGdb::BuildFileTable(MemBuf& buf, SymbolsInfo* lines, unsigned nlines)
{
- const char** files = nullptr;
+ NewArrayHolder<const char*> files = nullptr;
unsigned nfiles = 0;
/* GetValue file names and replace them with indices in file table */
@@ -649,6 +2001,8 @@ bool NotifyGdb::BuildFileTable(MemBuf& buf, SymbolsInfo* lines, unsigned nlines)
return false;
for (unsigned i = 0; i < nlines; ++i)
{
+ if (lines[i].fileName[0] == 0)
+ continue;
const char *filePath, *fileName;
SplitPathname(lines[i].fileName, filePath, fileName);
@@ -686,7 +2040,6 @@ bool NotifyGdb::BuildFileTable(MemBuf& buf, SymbolsInfo* lines, unsigned nlines)
if (buf.MemPtr == nullptr)
{
- delete[] files;
return false;
}
@@ -704,7 +2057,6 @@ bool NotifyGdb::BuildFileTable(MemBuf& buf, SymbolsInfo* lines, unsigned nlines)
// final zero byte
*ptr = 0;
- delete[] files;
return true;
}
@@ -742,44 +2094,77 @@ void NotifyGdb::IssueParamCommand(char*& ptr, uint8_t command, char* param, int
}
}
-/* Special command moves address, line number and issue one row to source line matrix */
-void NotifyGdb::IssueSpecialCommand(char*& ptr, int8_t line_shift, uint8_t addr_shift)
-{
- *ptr++ = (line_shift - DWARF_LINE_BASE) + addr_shift * DWARF_LINE_RANGE + DWARF_OPCODE_BASE;
-}
-
-/* Check to see if given shifts are fit into one byte command */
-bool NotifyGdb::FitIntoSpecialOpcode(int8_t line_shift, uint8_t addr_shift)
+static void fixLineMapping(SymbolsInfo* lines, unsigned nlines)
{
- unsigned opcode = (line_shift - DWARF_LINE_BASE) + addr_shift * DWARF_LINE_RANGE + DWARF_OPCODE_BASE;
-
- return opcode < 255;
+ // Fix EPILOGUE line mapping
+ int prevLine = 0;
+ for (int i = 0; i < nlines; ++i)
+ {
+ if (lines[i].lineNumber == HiddenLine)
+ continue;
+ if (lines[i].ilOffset == ICorDebugInfo::PROLOG) // will be fixed in next step
+ {
+ prevLine = 0;
+ }
+ else
+ {
+ if (lines[i].lineNumber == 0)
+ {
+ lines[i].lineNumber = prevLine;
+ }
+ else
+ {
+ prevLine = lines[i].lineNumber;
+ }
+ }
+ }
+ // Fix PROLOGUE line mapping
+ prevLine = lines[nlines - 1].lineNumber;
+ for (int i = nlines - 1; i >= 0; --i)
+ {
+ if (lines[i].lineNumber == HiddenLine)
+ continue;
+ if (lines[i].lineNumber == 0)
+ lines[i].lineNumber = prevLine;
+ else
+ prevLine = lines[i].lineNumber;
+ }
+ // Skip HiddenLines
+ for (int i = 0; i < nlines; ++i)
+ {
+ if (lines[i].lineNumber == HiddenLine)
+ {
+ lines[i].lineNumber = 0;
+ if (i + 1 < nlines && lines[i + 1].ilOffset == ICorDebugInfo::NO_MAPPING)
+ lines[i + 1].lineNumber = 0;
+ }
+ }
}
/* Build program for DWARF source line section */
-bool NotifyGdb::BuildLineProg(MemBuf& buf, PCODE startAddr, SymbolsInfo* lines, unsigned nlines)
+bool NotifyGdb::BuildLineProg(MemBuf& buf, PCODE startAddr, TADDR codeSize, SymbolsInfo* lines, unsigned nlines)
{
static char cnv_buf[16];
- /* reserve memory assuming worst case: one extended and one special plus advance line command for each line*/
- buf.MemSize = 3 + ADDRESS_SIZE /* initial set address command */
- + 1 /* set prolog end command */
+ /* reserve memory assuming worst case: set address, advance line command, set proglogue/epilogue and copy for each line */
+ buf.MemSize =
+ 6 /* set file command */
+ nlines * 6 /* advance line commands */
- + nlines * (4 + ADDRESS_SIZE) /* 1 extended + 1 special command */
+ + nlines * (3 + ADDRESS_SIZE) /* set address commands */
+ + nlines * 1 /* set prologue end or epilogue begin commands */
+ + nlines * 1 /* copy commands */
+ + 6 /* advance PC command */
+ 3; /* end of sequence command */
buf.MemPtr = new (nothrow) char[buf.MemSize];
char* ptr = buf.MemPtr;
if (buf.MemPtr == nullptr)
return false;
-
- /* set absolute start address */
- IssueSetAddress(ptr, startAddr);
- IssueSimpleCommand(ptr, DW_LNS_set_prologue_end);
-
- int prevLine = 1, prevAddr = 0, prevFile = 0;
-
+
+ fixLineMapping(lines, nlines);
+
+ int prevLine = 1, prevFile = 0;
+
for (int i = 0; i < nlines; ++i)
{
/* different source file */
@@ -789,26 +2174,38 @@ bool NotifyGdb::BuildLineProg(MemBuf& buf, PCODE startAddr, SymbolsInfo* lines,
IssueParamCommand(ptr, DW_LNS_set_file, cnv_buf, len);
prevFile = lines[i].fileIndex;
}
- /* too big line number shift */
- if (lines[i].lineNumber - prevLine > (DWARF_LINE_BASE + DWARF_LINE_RANGE - 1))
- {
+
+ // GCC don't use the is_prologue_end flag to mark the first instruction after the prologue.
+ // Instead of it it is issueing a line table entry for the first instruction of the prologue
+ // and one for the first instruction after the prologue.
+ // We do not want to confuse the debugger so we have to avoid adding a line in such case.
+ if (i > 0 && lines[i - 1].nativeOffset == lines[i].nativeOffset)
+ continue;
+
+ IssueSetAddress(ptr, startAddr + lines[i].nativeOffset);
+
+ if (lines[i].lineNumber != prevLine) {
int len = Leb128Encode(static_cast<int32_t>(lines[i].lineNumber - prevLine), cnv_buf, sizeof(cnv_buf));
IssueParamCommand(ptr, DW_LNS_advance_line, cnv_buf, len);
prevLine = lines[i].lineNumber;
}
- /* first try special opcode */
- if (FitIntoSpecialOpcode(lines[i].lineNumber - prevLine, lines[i].nativeOffset - prevAddr))
- IssueSpecialCommand(ptr, lines[i].lineNumber - prevLine, lines[i].nativeOffset - prevAddr);
- else
- {
- IssueSetAddress(ptr, startAddr + lines[i].nativeOffset);
- IssueSpecialCommand(ptr, lines[i].lineNumber - prevLine, 0);
- }
-
- prevLine = lines[i].lineNumber;
- prevAddr = lines[i].nativeOffset;
+
+ if (lines[i].ilOffset == ICorDebugInfo::EPILOG)
+ IssueSimpleCommand(ptr, DW_LNS_set_epilogue_begin);
+ else if (i > 0 && lines[i - 1].ilOffset == ICorDebugInfo::PROLOG)
+ IssueSimpleCommand(ptr, DW_LNS_set_prologue_end);
+
+ IssueParamCommand(ptr, DW_LNS_copy, NULL, 0);
}
-
+
+ int lastAddr = nlines > 0 ? lines[nlines - 1].nativeOffset : 0;
+
+ // Advance PC to the end of function
+ if (lastAddr < codeSize) {
+ int len = Leb128Encode(static_cast<uint32_t>(codeSize - lastAddr), cnv_buf, sizeof(cnv_buf));
+ IssueParamCommand(ptr, DW_LNS_advance_pc, cnv_buf, len);
+ }
+
IssueEndOfSequence(ptr);
buf.MemSize = ptr - buf.MemPtr;
@@ -816,16 +2213,31 @@ bool NotifyGdb::BuildLineProg(MemBuf& buf, PCODE startAddr, SymbolsInfo* lines,
}
/* Build the DWARF .debug_str section */
-bool NotifyGdb::BuildDebugStrings(MemBuf& buf)
+bool NotifyGdb::BuildDebugStrings(MemBuf& buf, PTK_TypeInfoMap pTypeMap)
{
- uint32_t totalLength = 0;
-
+ int totalLength = 0;
+
/* calculate total section size */
for (int i = 0; i < DebugStringCount; ++i)
{
totalLength += strlen(DebugStrings[i]) + 1;
}
-
+
+ for (int i = 0; i < method.GetCount(); ++i)
+ {
+ method[i]->DumpStrings(nullptr, totalLength);
+ }
+
+ {
+ auto iter = pTypeMap->Begin();
+ while (iter != pTypeMap->End())
+ {
+ TypeInfoBase *typeInfo = iter->Value();
+ typeInfo->DumpStrings(nullptr, totalLength);
+ iter++;
+ }
+ }
+
buf.MemSize = totalLength;
buf.MemPtr = new (nothrow) char[totalLength];
@@ -834,12 +2246,28 @@ bool NotifyGdb::BuildDebugStrings(MemBuf& buf)
/* copy strings */
char* bufPtr = buf.MemPtr;
+ int offset = 0;
for (int i = 0; i < DebugStringCount; ++i)
{
- strcpy(bufPtr, DebugStrings[i]);
- bufPtr += strlen(DebugStrings[i]) + 1;
+ strcpy(bufPtr + offset, DebugStrings[i]);
+ offset += strlen(DebugStrings[i]) + 1;
}
-
+
+ for (int i = 0; i < method.GetCount(); ++i)
+ {
+ method[i]->DumpStrings(bufPtr, offset);
+ }
+
+ {
+ auto iter = pTypeMap->Begin();
+ while (iter != pTypeMap->End())
+ {
+ TypeInfoBase *typeInfo = iter->Value();
+ typeInfo->DumpStrings(bufPtr, offset);
+ iter++;
+ }
+ }
+
return true;
}
@@ -848,7 +2276,7 @@ bool NotifyGdb::BuildDebugAbbrev(MemBuf& buf)
{
buf.MemPtr = new (nothrow) char[AbbrevTableSize];
buf.MemSize = AbbrevTableSize;
-
+
if (buf.MemPtr == nullptr)
return false;
@@ -857,31 +2285,79 @@ bool NotifyGdb::BuildDebugAbbrev(MemBuf& buf)
}
/* Build tge DWARF .debug_info section */
-bool NotifyGdb::BuildDebugInfo(MemBuf& buf)
+bool NotifyGdb::BuildDebugInfo(MemBuf& buf, PTK_TypeInfoMap pTypeMap, SymbolsInfo* lines, unsigned nlines)
{
- buf.MemSize = sizeof(DwarfCompUnit) + sizeof(DebugInfo) + 1;
+ int totalTypeVarSubSize = 0;
+ {
+ auto iter = pTypeMap->Begin();
+ while (iter != pTypeMap->End())
+ {
+ TypeInfoBase *typeInfo = iter->Value();
+ typeInfo->DumpDebugInfo(nullptr, totalTypeVarSubSize);
+ iter++;
+ }
+ }
+
+ for (int i = 0; i < method.GetCount(); ++i)
+ {
+ method[i]->lines = lines;
+ method[i]->nlines = nlines;
+ method[i]->DumpDebugInfo(nullptr, totalTypeVarSubSize);
+ }
+ // Drop pointers to lines when exiting current scope
+ struct DropMethodLines
+ {
+ ~DropMethodLines()
+ {
+ for (int i = 0; i < method.GetCount(); ++i)
+ {
+ method[i]->lines = nullptr;
+ method[i]->nlines = 0;
+ }
+ }
+ } dropMethodLines;
+
+ //int locSize = GetArgsAndLocalsLen(argsDebug, argsDebugSize, localsDebug, localsDebugSize);
+ buf.MemSize = sizeof(DwarfCompUnit) + sizeof(DebugInfoCU) + totalTypeVarSubSize + 2;
buf.MemPtr = new (nothrow) char[buf.MemSize];
if (buf.MemPtr == nullptr)
return false;
-
+ int offset = 0;
/* Compile uint header */
DwarfCompUnit* cu = reinterpret_cast<DwarfCompUnit*>(buf.MemPtr.GetValue());
cu->m_length = buf.MemSize - sizeof(uint32_t);
cu->m_version = 4;
cu->m_abbrev_offset = 0;
cu->m_addr_size = ADDRESS_SIZE;
-
- /* copy debug information */
- DebugInfo* di = reinterpret_cast<DebugInfo*>(buf.MemPtr + sizeof(DwarfCompUnit));
- memcpy(buf.MemPtr + sizeof(DwarfCompUnit), &debugInfo, sizeof(DebugInfo));
- di->m_prod_off = 0;
- di->m_cu_name = strlen(DebugStrings[0]) + 1;
- di->m_sub_name = strlen(DebugStrings[0]) + 1 + strlen(DebugStrings[1]) + 1 + strlen(DebugStrings[2]) + 1;
- di->m_type_name = strlen(DebugStrings[0]) + 1 + strlen(DebugStrings[1]) + 1 + strlen(DebugStrings[2]) + 1 + strlen(DebugStrings[3]) + 1;
-
- /* zero end marker */
- buf.MemPtr[buf.MemSize-1] = 0;
+ offset += sizeof(DwarfCompUnit);
+ DebugInfoCU* diCU =
+ reinterpret_cast<DebugInfoCU*>(buf.MemPtr + offset);
+ memcpy(buf.MemPtr + offset, &debugInfoCU, sizeof(DebugInfoCU));
+ offset += sizeof(DebugInfoCU);
+ diCU->m_prod_off = 0;
+ diCU->m_cu_name = strlen(DebugStrings[0]) + 1;
+ {
+ auto iter = pTypeMap->Begin();
+ while (iter != pTypeMap->End())
+ {
+ TypeInfoBase *typeInfo = iter->Value();
+ typeInfo->DumpDebugInfo(buf.MemPtr, offset);
+ iter++;
+ }
+ }
+ for (int i = 0; i < method.GetCount(); ++i)
+ {
+ if (!method[i]->IsDumped())
+ {
+ method[i]->DumpDebugInfo(buf.MemPtr, offset);
+ }
+ else
+ {
+ method[i]->DumpDebugInfo(buf.MemPtr, method[i]->m_entry_offset);
+ }
+ }
+ memset(buf.MemPtr + offset, 0, buf.MemSize - offset);
return true;
}
@@ -908,12 +2384,60 @@ bool NotifyGdb::BuildDebugPub(MemBuf& buf, const char* name, uint32_t size, uint
return true;
}
+/* Store addresses and names of the called methods into symbol table */
+bool NotifyGdb::CollectCalledMethods(CalledMethod* pCalledMethods, TADDR nativeCode)
+{
+ AddrSet tmpCodeAddrs;
+
+ if (!codeAddrs.Contains(nativeCode))
+ codeAddrs.Add(nativeCode);
+
+ CalledMethod* pList = pCalledMethods;
+
+ /* count called methods */
+ while (pList != NULL)
+ {
+ TADDR callAddr = (TADDR)pList->GetCallAddr();
+ if (!tmpCodeAddrs.Contains(callAddr) && !codeAddrs.Contains(callAddr)) {
+ tmpCodeAddrs.Add(callAddr);
+ }
+ pList = pList->GetNext();
+ }
+
+ SymbolCount = 1 + method.GetCount() + tmpCodeAddrs.GetCount();
+ SymbolNames = new (nothrow) Elf_Symbol[SymbolCount];
+
+ pList = pCalledMethods;
+ int i = 1 + method.GetCount();
+ while (i < SymbolCount && pList != NULL)
+ {
+ TADDR callAddr = (TADDR)pList->GetCallAddr();
+ if (!codeAddrs.Contains(callAddr))
+ {
+ MethodDesc* pMD = pList->GetMethodDesc();
+ LPCUTF8 methodName = pMD->GetName();
+ int symbolNameLength = strlen(methodName) + sizeof("__thunk_");
+ SymbolNames[i].m_name = new char[symbolNameLength];
+ SymbolNames[i].m_releaseName = true;
+ sprintf_s((char*)SymbolNames[i].m_name, symbolNameLength, "__thunk_%s", methodName);
+ SymbolNames[i].m_value = callAddr;
+ ++i;
+ codeAddrs.Add(callAddr);
+ }
+ CalledMethod* ptr = pList;
+ pList = pList->GetNext();
+ delete ptr;
+ }
+ SymbolCount = i;
+ return true;
+}
+
/* Build ELF .strtab section */
bool NotifyGdb::BuildStringTableSection(MemBuf& buf)
{
int len = 0;
- for (int i = 0; i < sizeof(SymbolNames) / sizeof(SymbolNames[0]); ++i)
- len += strlen(SymbolNames[i]) + 1;
+ for (int i = 0; i < SymbolCount; ++i)
+ len += strlen(SymbolNames[i].m_name) + 1;
len++; // end table with zero-length string
buf.MemSize = len;
@@ -921,10 +2445,11 @@ bool NotifyGdb::BuildStringTableSection(MemBuf& buf)
if (buf.MemPtr == nullptr)
return false;
char* ptr = buf.MemPtr;
- for (int i = 0; i < sizeof(SymbolNames) / sizeof(SymbolNames[0]); ++i)
+ for (int i = 0; i < SymbolCount; ++i)
{
- strcpy(ptr, SymbolNames[i]);
- ptr += strlen(SymbolNames[i]) + 1;
+ SymbolNames[i].m_off = ptr - buf.MemPtr;
+ strcpy(ptr, SymbolNames[i].m_name);
+ ptr += strlen(SymbolNames[i].m_name) + 1;
}
buf.MemPtr[buf.MemSize-1] = 0;
@@ -934,107 +2459,136 @@ bool NotifyGdb::BuildStringTableSection(MemBuf& buf)
/* Build ELF .symtab section */
bool NotifyGdb::BuildSymbolTableSection(MemBuf& buf, PCODE addr, TADDR codeSize)
{
- buf.MemSize = 2 * sizeof(Elf_Sym);
+ static const int textSectionIndex = GetSectionIndex(".text");
+
+ buf.MemSize = SymbolCount * sizeof(Elf_Sym);
buf.MemPtr = new (nothrow) char[buf.MemSize];
if (buf.MemPtr == nullptr)
return false;
Elf_Sym *sym = reinterpret_cast<Elf_Sym*>(buf.MemPtr.GetValue());
- sym->st_name = 0;
- sym->st_info = 0;
- sym->st_other = 0;
- sym->st_value = 0;
- sym->st_size = 0;
- sym->st_shndx = SHN_UNDEF;
-
- sym++;
- //sym = reinterpret_cast<Elf_Sym*>(buf.MemPtr.GetValue() + sizeof(Elf_Sym));
- sym->st_name = 1;
- sym->setBindingAndType(STB_GLOBAL, STT_FUNC);
- sym->st_other = 0;
-#ifdef _TARGET_ARM_
- sym->st_value = 1; // for THUMB code
-#else
- sym->st_value = 0;
-#endif
- sym->st_shndx = 1; // .text section index
- sym->st_size = codeSize;
- return true;
-}
-/* Build ELF string section */
-bool NotifyGdb::BuildSectionNameTable(MemBuf& buf)
-{
- uint32_t totalLength = 0;
+ sym[0].st_name = 0;
+ sym[0].st_info = 0;
+ sym[0].st_other = 0;
+ sym[0].st_value = 0;
+ sym[0].st_size = 0;
+ sym[0].st_shndx = SHN_UNDEF;
- /* calculate total size */
- for (int i = 0; i < SectionNamesCount; ++i)
+ for (int i = 1; i < 1 + method.GetCount(); ++i)
{
- totalLength += strlen(SectionNames[i]) + 1;
+ sym[i].st_name = SymbolNames[i].m_off;
+ sym[i].setBindingAndType(STB_GLOBAL, STT_FUNC);
+ sym[i].st_other = 0;
+ sym[i].st_value = PINSTRToPCODE(SymbolNames[i].m_value - addr);
+ sym[i].st_shndx = textSectionIndex;
+ sym[i].st_size = SymbolNames[i].m_size;
}
- buf.MemSize = totalLength;
- buf.MemPtr = new (nothrow) char[totalLength];
- if (buf.MemPtr == nullptr)
- return false;
-
- /* copy strings */
- char* bufPtr = buf.MemPtr;
- for (int i = 0; i < SectionNamesCount; ++i)
+ for (int i = 1 + method.GetCount(); i < SymbolCount; ++i)
{
- strcpy(bufPtr, SectionNames[i]);
- bufPtr += strlen(SectionNames[i]) + 1;
+ sym[i].st_name = SymbolNames[i].m_off;
+ sym[i].setBindingAndType(STB_GLOBAL, STT_FUNC);
+ sym[i].st_other = 0;
+ sym[i].st_shndx = SectionNamesCount + (i - (1 + method.GetCount())); // .thunks section index
+ sym[i].st_size = 8;
+#ifdef _TARGET_ARM_
+ sym[i].st_value = 1; // for THUMB code
+#else
+ sym[i].st_value = 0;
+#endif
}
-
return true;
}
-/* Build the ELF section headers table */
-bool NotifyGdb::BuildSectionTable(MemBuf& buf)
+int NotifyGdb::GetSectionIndex(const char *sectName)
{
- Elf_Shdr* sectionHeaders = new (nothrow) Elf_Shdr[SectionNamesCount - 1];
- Elf_Shdr* pSh = sectionHeaders;
+ for (int i = 0; i < SectionNamesCount; ++i)
+ if (strcmp(SectionNames[i], sectName) == 0)
+ return i;
+ return -1;
+}
+
+/* Build the ELF section headers table and section names table */
+bool NotifyGdb::BuildSectionTables(MemBuf& sectBuf, MemBuf& strBuf)
+{
+ static const int symtabSectionIndex = GetSectionIndex(".symtab");
+ static const int nullSectionIndex = GetSectionIndex("");
+
+ const int thunks_count = SymbolCount - 1 - method.GetCount();
+ // Approximate length of single section name.
+ // Used only to reduce memory reallocations.
+ static const int SECT_NAME_LENGTH = 11;
+
+ if (!strBuf.Resize(SECT_NAME_LENGTH * (SectionNamesCount + thunks_count)))
+ {
+ return false;
+ }
+
+ Elf_Shdr* sectionHeaders = new (nothrow) Elf_Shdr[SectionNamesCount + thunks_count];
if (sectionHeaders == nullptr)
{
return false;
}
-
- /* NULL entry */
- pSh->sh_name = 0;
- pSh->sh_type = SHT_NULL;
- pSh->sh_flags = 0;
- pSh->sh_addr = 0;
- pSh->sh_offset = 0;
- pSh->sh_size = 0;
- pSh->sh_link = SHN_UNDEF;
- pSh->sh_info = 0;
- pSh->sh_addralign = 0;
- pSh->sh_entsize = 0;
-
- ++pSh;
- /* fill section header data */
- uint32_t sectNameOffset = 1;
- for (int i = 1; i < SectionNamesCount - 1; ++i, ++pSh)
+
+ sectBuf.MemPtr = reinterpret_cast<char*>(sectionHeaders);
+ sectBuf.MemSize = sizeof(Elf_Shdr) * (SectionNamesCount + thunks_count);
+
+ Elf_Shdr* pSh = sectionHeaders;
+ uint32_t sectNameOffset = 0;
+
+ // Additional memory for remaining section names,
+ // grows twice on each reallocation.
+ int addSize = SECT_NAME_LENGTH;
+
+ // Fill section headers and names
+ for (int i = 0; i < SectionNamesCount + thunks_count; ++i, ++pSh)
{
+ char thunkSectNameBuf[256]; // temporary buffer for .thunk_# section name
+ const char *sectName;
+
+ bool isThunkSection = i >= SectionNamesCount;
+ if (isThunkSection)
+ {
+ sprintf_s(thunkSectNameBuf, _countof(thunkSectNameBuf), ".thunk_%i", i);
+ sectName = thunkSectNameBuf;
+ }
+ else
+ {
+ sectName = SectionNames[i];
+ }
+
+ // Ensure that there is enough memory for section name,
+ // reallocate if necessary.
pSh->sh_name = sectNameOffset;
- sectNameOffset += strlen(SectionNames[i]) + 1;
- pSh->sh_type = Sections[i].m_type;
- pSh->sh_flags = Sections[i].m_flags;
+ sectNameOffset += strlen(sectName) + 1;
+ if (sectNameOffset > strBuf.MemSize)
+ {
+ // Allocate more memory for remaining section names
+ if (!strBuf.Resize(sectNameOffset + addSize))
+ return false;
+ addSize *= 2;
+ }
+
+ strcpy(strBuf.MemPtr + pSh->sh_name, sectName);
+
+ // All .thunk_* sections have the same type and flags
+ int index = isThunkSection ? SectionNamesCount : i;
+ pSh->sh_type = Sections[index].m_type;
+ pSh->sh_flags = Sections[index].m_flags;
+
pSh->sh_addr = 0;
pSh->sh_offset = 0;
pSh->sh_size = 0;
pSh->sh_link = SHN_UNDEF;
pSh->sh_info = 0;
- pSh->sh_addralign = 1;
- if (strcmp(SectionNames[i], ".symtab") == 0)
- pSh->sh_entsize = sizeof(Elf_Sym);
- else
- pSh->sh_entsize = 0;
+ pSh->sh_addralign = i == nullSectionIndex ? 0 : 1;
+ pSh->sh_entsize = i == symtabSectionIndex ? sizeof(Elf_Sym) : 0;
}
- buf.MemPtr = reinterpret_cast<char*>(sectionHeaders);
- buf.MemSize = sizeof(Elf_Shdr) * (SectionNamesCount - 1);
+ // Set actual used size to avoid garbage in ELF section
+ strBuf.MemSize = sectNameOffset;
return true;
}
@@ -1042,17 +2596,18 @@ bool NotifyGdb::BuildSectionTable(MemBuf& buf)
bool NotifyGdb::BuildELFHeader(MemBuf& buf)
{
Elf_Ehdr* header = new (nothrow) Elf_Ehdr;
- buf.MemPtr = reinterpret_cast<char*>(header);
- buf.MemSize = sizeof(Elf_Ehdr);
-
+
if (header == nullptr)
+ {
return false;
-
+ }
+
+ buf.MemPtr = reinterpret_cast<char*>(header);
+ buf.MemSize = sizeof(Elf_Ehdr);
return true;
-
}
-/* Split full path name into directory & file anmes */
+/* Split full path name into directory & file names */
void NotifyGdb::SplitPathname(const char* path, const char*& pathName, const char*& fileName)
{
char* pSlash = strrchr(path, '/');
@@ -1070,47 +2625,6 @@ void NotifyGdb::SplitPathname(const char* path, const char*& pathName, const cha
}
}
-/* LEB128 for 32-bit unsigned integer */
-int NotifyGdb::Leb128Encode(uint32_t num, char* buf, int size)
-{
- int i = 0;
-
- do
- {
- uint8_t byte = num & 0x7F;
- if (i >= size)
- break;
- num >>= 7;
- if (num != 0)
- byte |= 0x80;
- buf[i++] = byte;
- }
- while (num != 0);
-
- return i;
-}
-
-/* LEB128 for 32-bit signed integer */
-int NotifyGdb::Leb128Encode(int32_t num, char* buf, int size)
-{
- int i = 0;
- bool hasMore = true, isNegative = num < 0;
-
- while (hasMore && i < size)
- {
- uint8_t byte = num & 0x7F;
- num >>= 7;
-
- if ((num == 0 && (byte & 0x40) == 0) || (num == -1 && (byte & 0x40) == 0x40))
- hasMore = false;
- else
- byte |= 0x80;
- buf[i++] = byte;
- }
-
- return i;
-}
-
#ifdef _DEBUG
void NotifyGdb::DumpElf(const char* methodName, const MemBuf& elfFile)
{
diff --git a/src/vm/gdbjit.h b/src/vm/gdbjit.h
index 467a970c0b..3160eccf57 100644
--- a/src/vm/gdbjit.h
+++ b/src/vm/gdbjit.h
@@ -16,6 +16,7 @@
#include <stdint.h>
#include "method.hpp"
+#include "dbginterface.h"
#include "../inc/llvm/ELF.h"
#include "../inc/llvm/Dwarf.h"
@@ -33,6 +34,46 @@
#error "Target is not supported"
#endif
+
+static constexpr const int CorElementTypeToDWEncoding[] =
+{
+/* ELEMENT_TYPE_END */ 0,
+/* ELEMENT_TYPE_VOID */ DW_ATE_address,
+/* ELEMENT_TYPE_BOOLEAN */ DW_ATE_boolean,
+/* ELEMENT_TYPE_CHAR */ DW_ATE_UTF,
+/* ELEMENT_TYPE_I1 */ DW_ATE_signed,
+/* ELEMENT_TYPE_U1 */ DW_ATE_unsigned,
+/* ELEMENT_TYPE_I2 */ DW_ATE_signed,
+/* ELEMENT_TYPE_U2 */ DW_ATE_unsigned,
+/* ELEMENT_TYPE_I4 */ DW_ATE_signed,
+/* ELEMENT_TYPE_U4 */ DW_ATE_unsigned,
+/* ELEMENT_TYPE_I8 */ DW_ATE_signed,
+/* ELEMENT_TYPE_U8 */ DW_ATE_unsigned,
+/* ELEMENT_TYPE_R4 */ DW_ATE_float,
+/* ELEMENT_TYPE_R8 */ DW_ATE_float,
+/* ELEMENT_TYPE_STRING */ DW_ATE_address,
+/* ELEMENT_TYPE_PTR */ DW_ATE_address,
+/* ELEMENT_TYPE_BYREF */ DW_ATE_address,
+/* ELEMENT_TYPE_VALUETYPE */ DW_ATE_address,
+/* ELEMENT_TYPE_CLASS */ DW_ATE_address,
+/* ELEMENT_TYPE_VAR */ DW_ATE_address,
+/* ELEMENT_TYPE_ARRAY */ DW_ATE_address,
+/* ELEMENT_TYPE_GENERICINST */ DW_ATE_address,
+/* ELEMENT_TYPE_TYPEDBYREF */ DW_ATE_address,
+/* SKIP 17 */ DW_ATE_address,
+/* ELEMENT_TYPE_I */ DW_ATE_signed,
+/* ELEMENT_TYPE_U */ DW_ATE_unsigned,
+/* SKIP 1a */ DW_ATE_address,
+/* ELEMENT_TYPE_FNPTR */ DW_ATE_address,
+/* ELEMENT_TYPE_OBJECT */ DW_ATE_address,
+/* ELEMENT_TYPE_SZARRAY */ DW_ATE_address,
+/* ELEMENT_TYPE_MVAR */ DW_ATE_address,
+/* ELEMENT_TYPE_CMOD_REQD */ DW_ATE_address,
+/* ELEMENT_TYPE_CMOD_OPT */ DW_ATE_address,
+/* ELEMENT_TYPE_INTERNAL */ DW_ATE_address,
+/* ELEMENT_TYPE_MAX */ DW_ATE_address,
+};
+
struct __attribute__((packed)) DwarfCompUnit
{
uint32_t m_length;
@@ -64,51 +105,374 @@ struct __attribute__((packed)) DwarfLineNumHeader
uint8_t m_std_num_arg[DW_LNS_MAX];
};
+const ULONG32 HiddenLine = 0x00feefee;
+
struct SymbolsInfo
{
int lineNumber, ilOffset, nativeOffset, fileIndex;
char fileName[2*MAX_PATH_FNAME];
+ ICorDebugInfo::SourceTypes source;
+};
+
+class DwarfDumpable
+{
+public:
+ // writes all string literals this type needs to ptr
+ virtual void DumpStrings(char* ptr, int& offset) = 0;
+
+ virtual void DumpDebugInfo(char* ptr, int& offset) = 0;
+};
+
+class LocalsInfo
+{
+public:
+ int size;
+ char** localsName;
+ ULONG32 countVars;
+ ICorDebugInfo::NativeVarInfo *pVars;
+};
+
+class TypeMember;
+
+class TypeInfoBase : public DwarfDumpable
+{
+public:
+ TypeInfoBase(TypeHandle typeHandle)
+ : m_type_name(nullptr),
+ m_type_name_offset(0),
+ m_type_size(0),
+ m_type_offset(0),
+ typeHandle(typeHandle),
+ typeKey(typeHandle.GetTypeKey())
+ {
+ }
+
+ virtual ~TypeInfoBase()
+ {
+ if (m_type_name != nullptr)
+ {
+ delete[] m_type_name;
+ }
+ }
+
+ virtual void DumpStrings(char* ptr, int& offset) override;
+ void CalculateName();
+ void SetTypeHandle(TypeHandle handle);
+ TypeHandle GetTypeHandle();
+ TypeKey* GetTypeKey();
+
+ char* m_type_name;
+ int m_type_name_offset;
+ ULONG m_type_size;
+ int m_type_offset;
+private:
+ TypeHandle typeHandle;
+ TypeKey typeKey;
+};
+
+class PrimitiveTypeInfo: public TypeInfoBase
+{
+public:
+ PrimitiveTypeInfo(TypeHandle typeHandle, int encoding)
+ : TypeInfoBase(typeHandle),
+ m_type_encoding(encoding)
+ {
+ }
+
+ void DumpDebugInfo(char* ptr, int& offset) override;
+
+ int m_type_encoding;
+};
+
+class RefTypeInfo: public TypeInfoBase
+{
+public:
+ RefTypeInfo(TypeHandle typeHandle, TypeInfoBase *value_type)
+ : TypeInfoBase(typeHandle),
+ m_value_type(value_type)
+ {
+ }
+ void DumpStrings(char* ptr, int& offset) override;
+ void DumpDebugInfo(char* ptr, int& offset) override;
+ TypeInfoBase *m_value_type;
+};
+
+class ClassTypeInfo: public TypeInfoBase
+{
+public:
+ ClassTypeInfo(TypeHandle typeHandle, int num_members);
+ ~ClassTypeInfo();
+
+ void DumpStrings(char* ptr, int& offset) override;
+ void DumpDebugInfo(char* ptr, int& offset) override;
+
+ int m_num_members;
+ TypeMember* members;
+};
+
+class TypeMember: public DwarfDumpable
+{
+public:
+ TypeMember()
+ : m_member_name(nullptr),
+ m_member_name_offset(0),
+ m_member_offset(0),
+ m_static_member_address(0),
+ m_member_type(nullptr)
+ {
+ }
+
+ ~TypeMember()
+ {
+ if (m_member_name != nullptr)
+ {
+ delete[] m_member_name;
+ }
+ }
+
+ void DumpStrings(char* ptr, int& offset) override;
+ void DumpDebugInfo(char* ptr, int& offset) override;
+ void DumpStaticDebugInfo(char* ptr, int& offset);
+
+ char* m_member_name;
+ int m_member_name_offset;
+ int m_member_offset;
+ TADDR m_static_member_address;
+ TypeInfoBase *m_member_type;
};
+class ArrayTypeInfo: public TypeInfoBase
+{
+public:
+ ArrayTypeInfo(TypeHandle typeHandle, int countOffset, TypeInfoBase* elemType)
+ : TypeInfoBase(typeHandle),
+ m_count_offset(countOffset),
+ m_elem_type(elemType)
+ {
+ }
+
+ ~ArrayTypeInfo()
+ {
+ if (m_elem_type != nullptr)
+ {
+ delete m_elem_type;
+ }
+ }
+
+ void DumpDebugInfo(char* ptr, int& offset) override;
+
+ int m_count_offset;
+ TypeInfoBase *m_elem_type;
+};
+
+class VarDebugInfo: public DwarfDumpable
+{
+public:
+ VarDebugInfo(int abbrev)
+ : m_var_name(nullptr),
+ m_var_abbrev(abbrev),
+ m_var_name_offset(0),
+ m_il_index(0),
+ m_native_offset(0),
+ m_var_type(nullptr)
+ {
+ }
+
+ VarDebugInfo()
+ : m_var_name(nullptr),
+ m_var_abbrev(6),
+ m_var_name_offset(0),
+ m_il_index(0),
+ m_native_offset(0),
+ m_var_type(nullptr)
+ {
+ }
+
+ virtual ~VarDebugInfo()
+ {
+ delete[] m_var_name;
+ }
+
+ void DumpStrings(char* ptr, int& offset) override;
+ void DumpDebugInfo(char* ptr, int& offset) override;
+
+ char* m_var_name;
+ int m_var_abbrev;
+ int m_var_name_offset;
+ int m_il_index;
+ int m_native_offset;
+ TypeInfoBase *m_var_type;
+};
class NotifyGdb
{
public:
static void MethodCompiled(MethodDesc* MethodDescPtr);
static void MethodDropped(MethodDesc* MethodDescPtr);
+ template <typename PARENT_TRAITS>
+ class DeleteValuesOnDestructSHashTraits : public PARENT_TRAITS
+ {
+ public:
+ static inline void OnDestructPerEntryCleanupAction(typename PARENT_TRAITS::element_t e)
+ {
+ delete e.Value();
+ }
+ static const bool s_DestructPerEntryCleanupAction = true;
+ };
+
+ template <typename VALUE>
+ class TypeKeyHashTraits : public DefaultSHashTraits< KeyValuePair<TypeKey*,VALUE> >
+ {
+ public:
+ // explicitly declare local typedefs for these traits types, otherwise
+ // the compiler may get confused
+ typedef typename DefaultSHashTraits< KeyValuePair<TypeKey*,VALUE> >::element_t element_t;
+ typedef typename DefaultSHashTraits< KeyValuePair<TypeKey*,VALUE> >::count_t count_t;
+ typedef TypeKey* key_t;
+
+ static key_t GetKey(element_t e)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return e.Key();
+ }
+ static BOOL Equals(key_t k1, key_t k2)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return k1->Equals(k2);
+ }
+ static count_t Hash(key_t k)
+ {
+ LIMITED_METHOD_CONTRACT;
+ return k->ComputeHash();
+ }
+
+ static const element_t Null() { LIMITED_METHOD_CONTRACT; return element_t(key_t(),VALUE()); }
+ static const element_t Deleted() { LIMITED_METHOD_CONTRACT; return element_t(key_t(-1), VALUE()); }
+ static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e.Key() == key_t(); }
+ static bool IsDeleted(const element_t &e) { return e.Key() == key_t(-1); }
+ };
+
+ typedef MapSHash<TypeKey*, TypeInfoBase*, DeleteValuesOnDestructSHashTraits<TypeKeyHashTraits<TypeInfoBase*>>> TK_TypeInfoMap;
+ typedef TK_TypeInfoMap* PTK_TypeInfoMap;
+ typedef SetSHash< TADDR,
+ NoRemoveSHashTraits <
+ NonDacAwareSHashTraits< SetSHashTraits <TADDR> >
+ > > AddrSet;
private:
+
struct MemBuf
{
NewArrayHolder<char> MemPtr;
unsigned MemSize;
MemBuf() : MemPtr(0), MemSize(0)
{}
+ bool Resize(unsigned newSize)
+ {
+ if (newSize == 0)
+ {
+ MemPtr = nullptr;
+ MemSize = 0;
+ return true;
+ }
+ char *tmp = new (nothrow) char [newSize];
+ if (tmp == nullptr)
+ return false;
+ memmove(tmp, MemPtr.GetValue(), newSize < MemSize ? newSize : MemSize);
+ MemPtr = tmp;
+ MemSize = newSize;
+ return true;
+ }
};
+ static int GetSectionIndex(const char *sectName);
static bool BuildELFHeader(MemBuf& buf);
- static bool BuildSectionNameTable(MemBuf& buf);
- static bool BuildSectionTable(MemBuf& buf);
+ static bool BuildSectionTables(MemBuf& sectBuf, MemBuf& strBuf);
static bool BuildSymbolTableSection(MemBuf& buf, PCODE addr, TADDR codeSize);
static bool BuildStringTableSection(MemBuf& strTab);
- static bool BuildDebugStrings(MemBuf& buf);
- static bool BuildDebugAbbrev(MemBuf& buf);
- static bool BuildDebugInfo(MemBuf& buf);
+ static bool BuildDebugStrings(MemBuf& buf, PTK_TypeInfoMap pTypeMap);
+ static bool BuildDebugAbbrev(MemBuf& buf);
+ static bool BuildDebugInfo(MemBuf& buf, PTK_TypeInfoMap pTypeMap, SymbolsInfo* lines, unsigned nlines);
static bool BuildDebugPub(MemBuf& buf, const char* name, uint32_t size, uint32_t dieOffset);
- static bool BuildLineTable(MemBuf& buf, PCODE startAddr, SymbolsInfo* lines, unsigned nlines);
+ static bool BuildLineTable(MemBuf& buf, PCODE startAddr, TADDR codeSize, SymbolsInfo* lines, unsigned nlines);
static bool BuildFileTable(MemBuf& buf, SymbolsInfo* lines, unsigned nlines);
- static bool BuildLineProg(MemBuf& buf, PCODE startAddr, SymbolsInfo* lines, unsigned nlines);
- static bool FitIntoSpecialOpcode(int8_t line_shift, uint8_t addr_shift);
+ static bool BuildLineProg(MemBuf& buf, PCODE startAddr, TADDR codeSize, SymbolsInfo* lines, unsigned nlines);
static void IssueSetAddress(char*& ptr, PCODE addr);
static void IssueEndOfSequence(char*& ptr);
static void IssueSimpleCommand(char*& ptr, uint8_t command);
static void IssueParamCommand(char*& ptr, uint8_t command, char* param, int param_len);
- static void IssueSpecialCommand(char*& ptr, int8_t line_shift, uint8_t addr_shift);
static void SplitPathname(const char* path, const char*& pathName, const char*& fileName);
- static int Leb128Encode(uint32_t num, char* buf, int size);
- static int Leb128Encode(int32_t num, char* buf, int size);
+ static bool CollectCalledMethods(CalledMethod* pCM, TADDR nativeCode);
#ifdef _DEBUG
static void DumpElf(const char* methodName, const MemBuf& buf);
#endif
};
+class FunctionMember: public TypeMember
+{
+public:
+ FunctionMember(MethodDesc *md, int num_locals, int num_args)
+ : TypeMember(),
+ md(md),
+ m_file(1),
+ m_line(1),
+ m_sub_low_pc(0),
+ m_sub_high_pc(0),
+ m_sub_loc(),
+ m_num_args(num_args),
+ m_num_locals(num_locals),
+ m_num_vars(num_args + num_locals),
+ m_entry_offset(0),
+ vars(new VarDebugInfo[m_num_vars]),
+ lines(NULL),
+ nlines(0),
+ m_linkage_name_offset(0),
+ dumped(false)
+ {
+ m_sub_loc[0] = 1;
+#if defined(_TARGET_AMD64_)
+ m_sub_loc[1] = DW_OP_reg6;
+#elif defined(_TARGET_ARM_)
+ m_sub_loc[1] = DW_OP_reg11;
+#else
+#error Unsupported platform!
+#endif
+ }
+
+ virtual ~FunctionMember()
+ {
+ delete[] vars;
+ }
+
+ void DumpStrings(char* ptr, int& offset) override;
+ void DumpDebugInfo(char* ptr, int& offset) override;
+ void DumpTryCatchDebugInfo(char* ptr, int& offset);
+ HRESULT GetLocalsDebugInfo(NotifyGdb::PTK_TypeInfoMap pTypeMap,
+ LocalsInfo& locals,
+ int startNativeOffset);
+ BOOL IsDumped()
+ {
+ return dumped;
+ }
+
+ MethodDesc *md;
+ uint8_t m_file, m_line;
+ uintptr_t m_sub_low_pc, m_sub_high_pc;
+ uint8_t m_sub_loc[2];
+ uint8_t m_num_args;
+ uint8_t m_num_locals;
+ uint16_t m_num_vars;
+ int m_entry_offset;
+ VarDebugInfo* vars;
+ SymbolsInfo* lines;
+ unsigned nlines;
+ int m_linkage_name_offset;
+private:
+ int GetArgsAndLocalsLen();
+ void MangleName(char *buf, int &buf_offset, const char *name);
+ void DumpMangledNamespaceAndMethod(char *buf, int &offset, const char *nspace, const char *mname);
+ void DumpLinkageName(char* ptr, int& offset);
+ bool GetBlockInNativeCode(int blockILOffset, int blockILLen, TADDR *startOffset, TADDR *endOffset);
+ void DumpTryCatchBlock(char* ptr, int& offset, int ilOffset, int ilLen, int abbrev);
+ BOOL dumped;
+};
#endif // #ifndef __GDBJIT_H__
diff --git a/src/vm/gdbjithelpers.h b/src/vm/gdbjithelpers.h
index 1298141e2d..3a34dd179a 100644
--- a/src/vm/gdbjithelpers.h
+++ b/src/vm/gdbjithelpers.h
@@ -23,6 +23,8 @@ struct MethodDebugInfo
{
SequencePointInfo* points;
int size;
+ char16_t** locals;
+ int localsSize;
};
typedef BOOL (*GetInfoForMethodDelegate)(const char*, unsigned int, MethodDebugInfo& methodDebugInfo);
diff --git a/src/vm/hash.cpp b/src/vm/hash.cpp
index 205f736b0d..6b6b21391f 100644
--- a/src/vm/hash.cpp
+++ b/src/vm/hash.cpp
@@ -547,8 +547,8 @@ UPTR HashMap::LookupValue(UPTR key, UPTR value)
// BROKEN: This is called for the RCWCache on the GC thread
// Also called by AppDomain::FindCachedAssembly to resolve AssemblyRef -- this is used by stack walking on the GC thread.
- // See comments in GCHeap::RestartEE (above the call to SyncClean::CleanUp) for reason to enter COOP mode.
- // However, if the current thread is the GC thread, we know we're not going to call GCHeap::RestartEE
+ // See comments in GCHeapUtilities::RestartEE (above the call to SyncClean::CleanUp) for reason to enter COOP mode.
+ // However, if the current thread is the GC thread, we know we're not going to call GCHeapUtilities::RestartEE
// while accessing the HashMap, so it's safe to proceed.
// (m_fAsyncMode && !IsGCThread() is the condition for entering COOP mode. I.e., enable COOP GC only if
// the HashMap is in async mode and this is not a GC thread.)
diff --git a/src/vm/i386/RedirectedHandledJITCase.asm b/src/vm/i386/RedirectedHandledJITCase.asm
index 80345623e7..44a93bd10c 100644
--- a/src/vm/i386/RedirectedHandledJITCase.asm
+++ b/src/vm/i386/RedirectedHandledJITCase.asm
@@ -103,7 +103,7 @@ _ExceptionHijack@0 PROC PUBLIC
; This is where we land when we're hijacked from an IP by the debugger.
; The debugger has already pushed the args:
; - a CONTEXT
- ; - a EXCEPTION_RECORD onto the stack
+ ; - an EXCEPTION_RECORD onto the stack
; - an DWORD to use to mulitplex the hijack
; - an arbitrary void* data parameter
call _ExceptionHijackWorker@16
diff --git a/src/vm/i386/asmconstants.h b/src/vm/i386/asmconstants.h
index 5fd39d6897..c42b167f32 100644
--- a/src/vm/i386/asmconstants.h
+++ b/src/vm/i386/asmconstants.h
@@ -449,6 +449,24 @@ ASMCONSTANTS_C_ASSERT(CallDescrData__fpReturnSize == offsetof(CallDescrD
ASMCONSTANTS_C_ASSERT(CallDescrData__pTarget == offsetof(CallDescrData, pTarget))
ASMCONSTANTS_C_ASSERT(CallDescrData__returnValue == offsetof(CallDescrData, returnValue))
+#define UMEntryThunk__m_pUMThunkMarshInfo 0x0C
+ASMCONSTANTS_C_ASSERT(UMEntryThunk__m_pUMThunkMarshInfo == offsetof(UMEntryThunk, m_pUMThunkMarshInfo))
+
+#define UMEntryThunk__m_dwDomainId 0x10
+ASMCONSTANTS_C_ASSERT(UMEntryThunk__m_dwDomainId == offsetof(UMEntryThunk, m_dwDomainId))
+
+#define UMThunkMarshInfo__m_pILStub 0x00
+ASMCONSTANTS_C_ASSERT(UMThunkMarshInfo__m_pILStub == offsetof(UMThunkMarshInfo, m_pILStub))
+
+#define UMThunkMarshInfo__m_cbActualArgSize 0x04
+ASMCONSTANTS_C_ASSERT(UMThunkMarshInfo__m_cbActualArgSize == offsetof(UMThunkMarshInfo, m_cbActualArgSize))
+
+#ifndef CROSSGEN_COMPILE
+#define Thread__m_pDomain 0x14
+ASMCONSTANTS_C_ASSERT(Thread__m_pDomain == offsetof(Thread, m_pDomain));
+
+#endif
+
#undef ASMCONSTANTS_C_ASSERT
#undef ASMCONSTANTS_RUNTIME_ASSERT
diff --git a/src/vm/i386/asmhelpers.S b/src/vm/i386/asmhelpers.S
new file mode 100644
index 0000000000..1c6f0a36f6
--- /dev/null
+++ b/src/vm/i386/asmhelpers.S
@@ -0,0 +1,1140 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+//
+// FramedMethodFrame prolog
+//
+.macro STUB_PROLOG
+ // push ebp-frame
+ PROLOG_BEG
+
+ // save CalleeSavedRegisters
+ PROLOG_PUSH ebx
+ PROLOG_PUSH esi
+ PROLOG_PUSH edi
+
+ // push ArgumentRegisters
+ PROLOG_PUSH ecx
+ PROLOG_PUSH edx
+
+ // set frame pointer
+ PROLOG_END
+.endm
+
+//
+// FramedMethodFrame epilog
+//
+.macro STUB_EPILOG
+ // restore stack pointer
+ EPILOG_BEG
+
+ // pop ArgumentRegisters
+ EPILOG_POP edx
+ EPILOG_POP ecx
+
+ // pop CalleeSavedRegisters
+ EPILOG_POP edi
+ EPILOG_POP esi
+ EPILOG_POP ebx
+
+ // pop ebp-frame
+ EPILOG_END
+.endm
+
+//
+// FramedMethodFrame epilog
+//
+.macro STUB_EPILOG_RETURN
+ // pop ArgumentRegisters
+ add esp, 8
+
+ // pop CalleeSavedRegisters
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+.endm
+
+.macro STUB_PROLOG_2_HIDDEN_ARGS
+ //
+ // The stub arguments are where we want to setup the TransitionBlock. We will
+ // setup the TransitionBlock later once we can trash them
+ //
+ // push ebp-frame
+ // push ebp
+ // mov ebp,esp
+
+ // save CalleeSavedRegisters
+ // push ebx
+
+ push esi
+ push edi
+
+ // push ArgumentRegisters
+ push ecx
+ push edx
+
+ mov ecx, [esp + 4*4]
+ mov edx, [esp + 5*4]
+
+ // Setup up proper EBP frame now that the stub arguments can be trashed
+ mov [esp + 4*4], ebx
+ mov [esp + 5*4], ebp
+ lea ebp, [esp + 5*4]
+.endm
+
+LEAF_ENTRY ResetCurrentContext, _TEXT
+ push eax
+
+ // clear the direction flag (used for rep instructions)
+ cld
+
+ // load flags into AX
+ fnstcw [esp - 2]
+ mov ax, [esp - 2]
+
+ fninit // reset FPU
+ and ax, 0f00h // preserve precision and rounding control
+ or ax, 007fh // mask all exceptions
+
+ // preserve precision control
+ mov ax, [esp - 2]
+ fldcw [esp - 2]
+
+ pop eax
+ ret
+LEAF_END ResetCurrentContext, _TEXT
+
+// Incoming:
+// ESP+4: Pointer to buffer to which FPU state should be saved
+LEAF_ENTRY CaptureFPUContext, _TEXT
+ mov ecx, [esp + 4]
+ fnstenv [ecx]
+ ret 4
+
+LEAF_END CaptureFPUContext, _TEXT
+
+// Incoming:
+// ESP+4: Pointer to buffer from which FPU state should be restored
+LEAF_ENTRY RestoreFPUContext, _TEXT
+ mov ecx, [esp + 4]
+ fldenv [ecx]
+ ret 4
+LEAF_END RestoreFPUContext, _TEXT
+
+LEAF_ENTRY ResumeAtJitEHHelper, _TEXT
+ mov edx, [esp + 4] // edx = pContext (EHContext*)
+
+ mov ebx, [edx + EHContext_Ebx]
+ mov esi, [edx + EHContext_Esi]
+ mov edi, [edx + EHContext_Edi]
+ mov ebp, [edx + EHContext_Ebp]
+ mov ecx, [edx + EHContext_Esp]
+ mov eax, [edx + EHContext_Eip]
+ mov [ecx - 4], eax
+ mov eax, [edx + EHContext_Eax]
+ mov [ecx - 8], eax
+ mov eax, [edx + EHContext_Ecx]
+ mov [ecx - 0Ch], eax
+ mov eax, [edx + EHContext_Edx]
+ mov [ecx - 10h], eax
+ lea esp, [ecx - 10h]
+ pop edx
+ pop ecx
+ pop eax
+ ret
+LEAF_END ResumeAtJitEHHelper, _TEXT
+
+// int __stdcall CallJitEHFilterHelper(size_t *pShadowSP, EHContext *pContext);
+// on entry, only the pContext->Esp, Ebx, Esi, Edi, Ebp, and Eip are initialized
+NESTED_ENTRY CallJitEHFilterHelper, _TEXT, NoHandler
+ push ebp
+ mov ebp, esp
+ push ebx
+ push esi
+ push edi
+
+ // Write esp-4 to the shadowSP slot
+ mov eax, [ebp + 8] // pShadowSP = [ebp+8]
+ test eax, eax
+ jz LOCAL_LABEL(DONE_SHADOWSP_FILTER)
+ mov ebx, esp
+ sub ebx, 4
+ or ebx, SHADOW_SP_IN_FILTER_ASM
+ mov [eax], ebx
+
+LOCAL_LABEL(DONE_SHADOWSP_FILTER):
+ mov edx, [ebp + 12] // pContext = [ebp+12]
+ mov eax, [edx + EHContext_Eax]
+ mov ebx, [edx + EHContext_Ebx]
+ mov esi, [edx + EHContext_Esi]
+ mov edi, [edx + EHContext_Edi]
+ mov ebp, [edx + EHContext_Ebp]
+
+ call DWORD PTR [edx + EHContext_Eip]
+#ifdef _DEBUG
+ nop // Indicate that it is OK to call managed code directly from here
+#endif // _DEBUG
+
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp // don't use 'leave' here, as ebp as been trashed
+ ret 8
+NESTED_END CallJitEHFilterHelper, _TEXT
+
+// void __stdcall CallJITEHFinallyHelper(size_t *pShadowSP, EHContext *pContext);
+// on entry, only the pContext->Esp, Ebx, Esi, Edi, Ebp, and Eip are initialized
+NESTED_ENTRY CallJitEHFinallyHelper, _TEXT, NoHandler
+ push ebp
+ mov ebp, esp
+ push ebx
+ push esi
+ push edi
+
+ // Write esp-4 to the shadowSP slot
+ mov eax, [ebp + 8] // pShadowSP = [ebp+8]
+ test eax, eax
+ jz LOCAL_LABEL(DONE_SHADOWSP_FINALLY)
+ mov ebx, esp
+ sub ebx, 4
+ mov [eax], ebx
+
+LOCAL_LABEL(DONE_SHADOWSP_FINALLY):
+ mov edx, [ebp + 12] // pContext = [ebp+12]
+ mov eax, [edx + EHContext_Eax]
+ mov ebx, [edx + EHContext_Ebx]
+ mov esi, [edx + EHContext_Esi]
+ mov edi, [edx + EHContext_Edi]
+ mov ebp, [edx + EHContext_Ebp]
+ call DWORD PTR [edx + EHContext_Eip]
+#ifdef _DEBUG
+ nop // Indicate that it is OK to call managed code directly from here
+#endif // _DEBUG
+
+ // Reflect the changes to the context and only update non-volatile registers.
+ // This will be used later to update REGDISPLAY
+ mov edx, [esp + 12 + 12]
+ mov [edx + EHContext_Ebx], ebx
+ mov [edx + EHContext_Esi], esi
+ mov [edx + EHContext_Edi], edi
+ mov [edx + EHContext_Ebp], ebp
+
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp // don't use 'leave' here, as ebp as been trashed
+ ret 8
+NESTED_END CallJitEHFinallyHelper, _TEXT
+
+LEAF_ENTRY GetSpecificCpuTypeAsm, _TEXT
+ push ebx // ebx is trashed by the cpuid calls
+
+ // See if the chip supports CPUID
+ pushfd
+ pop ecx // Get the EFLAGS
+ mov eax, ecx // Save for later testing
+ xor ecx, 200000h // Invert the ID bit
+ push ecx
+ popfd // Save the updated flags
+ pushfd
+ pop ecx // Retrieve the updated flags
+ xor ecx, eax // Test if it actually changed (bit set means yes)
+ push eax
+ popfd // Restore the flags
+
+ test ecx, 200000h
+ jz LOCAL_LABEL(Assume486)
+
+ xor eax, eax
+ cpuid
+
+ test eax, eax
+ jz LOCAL_LABEL(Assume486) // brif CPUID1 not allowed
+
+ mov eax, 1
+ cpuid
+
+ // filter out everything except family and model
+ // Note that some multi-procs have different stepping number for each proc
+ and eax, 0ff0h
+
+ jmp LOCAL_LABEL(CpuTypeDone)
+
+LOCAL_LABEL(Assume486):
+ mov eax, 0400h // report 486
+
+LOCAL_LABEL(CpuTypeDone):
+ pop ebx
+ ret
+LEAF_END GetSpecificCpuTypeAsm, _TEXT
+
+// DWORD __stdcall GetSpecificCpuFeaturesAsm(DWORD *pInfo);
+LEAF_ENTRY GetSpecificCpuFeaturesAsm, _TEXT
+ push ebx // ebx is trashed by the cpuid calls
+
+ // See if the chip supports CPUID
+ pushfd
+ pop ecx // Get the EFLAGS
+ mov eax, ecx // Save for later testing
+ xor ecx, 200000h // Invert the ID bit.
+ push ecx
+ popfd // Save the updated flags.
+ pushfd
+ pop ecx // Retrieve the updated flags
+ xor ecx, eax // Test if it actually changed (bit set means yes)
+ push eax
+ popfd // Restore the flags
+
+ test ecx, 200000h
+ jz LOCAL_LABEL(CpuFeaturesFail)
+
+ xor eax, eax
+ cpuid
+
+ test eax, eax
+ jz LOCAL_LABEL(CpuFeaturesDone) // br if CPUID1 not allowed
+
+ mov eax, 1
+ cpuid
+ mov eax, edx // return all feature flags
+ mov edx, [esp + 8]
+ test edx, edx
+ jz LOCAL_LABEL(CpuFeaturesDone)
+ mov [edx],ebx // return additional useful information
+ jmp LOCAL_LABEL(CpuFeaturesDone)
+
+LOCAL_LABEL(CpuFeaturesFail):
+ xor eax, eax // Nothing to report
+
+LOCAL_LABEL(CpuFeaturesDone):
+ pop ebx
+ ret 4
+LEAF_END GetSpecificCpuFeaturesAsm, _TEXT
+
+
+// -----------------------------------------------------------------------
+// The out-of-line portion of the code to enable preemptive GC.
+// After the work is done, the code jumps back to the "pRejoinPoint"
+// which should be emitted right after the inline part is generated.
+//
+// Assumptions:
+// ebx = Thread
+// Preserves
+// all registers except ecx.
+//
+// -----------------------------------------------------------------------
+NESTED_ENTRY StubRareEnable, _TEXT, NoHandler
+ push eax
+ push edx
+
+ push ebx
+ call C_FUNC(StubRareEnableWorker)
+
+ pop edx
+ pop eax
+ ret
+NESTED_END StubRareEnable, _TEXT
+
+NESTED_ENTRY StubRareDisableTHROW, _TEXT, NoHandler
+ push eax
+ push edx
+
+ push ebx // Thread
+ call C_FUNC(StubRareDisableTHROWWorker)
+
+ pop edx
+ pop eax
+ ret
+NESTED_END StubRareDisableTHROW, _TEXT
+
+LEAF_ENTRY InternalExceptionWorker, _TEXT
+ pop edx // recover RETADDR
+ add esp, eax // release caller's args
+ push edx // restore RETADDR
+ jmp C_FUNC(JIT_InternalThrow)
+LEAF_END InternalExceptionWorker, _TEXT
+
+// EAX -> number of caller arg bytes on the stack that we must remove before going
+// to the throw helper, which assumes the stack is clean.
+LEAF_ENTRY ArrayOpStubNullException, _TEXT
+ // kFactorReg and kTotalReg could not have been modified, but let's pop
+ // them anyway for consistency and to avoid future bugs.
+ pop esi
+ pop edi
+ mov ecx, CORINFO_NullReferenceException_ASM
+ jmp C_FUNC(InternalExceptionWorker)
+LEAF_END ArrayOpStubNullException, _TEXT
+
+// EAX -> number of caller arg bytes on the stack that we must remove before going
+// to the throw helper, which assumes the stack is clean.
+LEAF_ENTRY ArrayOpStubRangeException, _TEXT
+ // kFactorReg and kTotalReg could not have been modified, but let's pop
+ // them anyway for consistency and to avoid future bugs.
+ pop esi
+ pop edi
+ mov ecx, CORINFO_IndexOutOfRangeException_ASM
+ jmp C_FUNC(InternalExceptionWorker)
+LEAF_END ArrayOpStubRangeException, _TEXT
+
+// EAX -> number of caller arg bytes on the stack that we must remove before going
+// to the throw helper, which assumes the stack is clean.
+LEAF_ENTRY ArrayOpStubTypeMismatchException, _TEXT
+ // kFactorReg and kTotalReg could not have been modified, but let's pop
+ // them anyway for consistency and to avoid future bugs.
+ pop esi
+ pop edi
+ mov ecx, CORINFO_ArrayTypeMismatchException_ASM
+ jmp C_FUNC(InternalExceptionWorker)
+LEAF_END ArrayOpStubTypeMismatchException, _TEXT
+
+// ------------------------------------------------------------------------------
+// This helper routine enregisters the appropriate arguments and makes the
+// actual call.
+// ------------------------------------------------------------------------------
+// void STDCALL CallDescrWorkerInternal(CallDescrWorkerParams * pParams)
+NESTED_ENTRY CallDescrWorkerInternal, _TEXT, NoHandler
+ PROLOG_BEG
+ PROLOG_PUSH ebx
+ PROLOG_END
+
+ mov ebx, [esp + ((2 + 1) * 4)]
+
+ // compute padding size
+ mov eax, esp
+ mov ecx, [ebx + CallDescrData__numStackSlots]
+ shl ecx, 2
+ sub eax, ecx
+ and eax, 15
+ // adjust stack offset
+ sub esp, eax
+
+ // copy the stack
+ mov ecx, [ebx +CallDescrData__numStackSlots]
+ mov eax, [ebx +CallDescrData__pSrc]
+ test ecx, ecx
+ jz LOCAL_LABEL(donestack)
+ lea eax, [eax + 4*ecx - 4] // last argument
+ push DWORD PTR [eax]
+ dec ecx
+ jz LOCAL_LABEL(donestack)
+ sub eax, 4
+ push DWORD PTR [eax]
+ dec ecx
+ jz LOCAL_LABEL(donestack)
+
+LOCAL_LABEL(stackloop):
+ sub eax, 4
+ push DWORD PTR [eax]
+ dec ecx
+ jnz LOCAL_LABEL(stackloop)
+
+LOCAL_LABEL(donestack):
+ // now we must push each field of the ArgumentRegister structure
+ mov eax, [ebx + CallDescrData__pArgumentRegisters]
+ mov edx, DWORD PTR [eax]
+ mov ecx, DWORD PTR [eax + 4]
+
+ CHECK_STACK_ALIGNMENT
+ call [ebx + CallDescrData__pTarget]
+#ifdef _DEBUG
+ nop // This is a tag that we use in an assert. Fcalls expect to
+ // be called from Jitted code or from certain blessed call sites like
+ // this one. (See HelperMethodFrame::InsureInit)
+#endif
+
+ // Save FP return value if necessary
+ mov ecx, [ebx + CallDescrData__fpReturnSize]
+ cmp ecx, 0
+ je LOCAL_LABEL(ReturnsInt)
+
+ cmp ecx, 4
+ je LOCAL_LABEL(ReturnsFloat)
+ cmp ecx, 8
+ je LOCAL_LABEL(ReturnsDouble)
+ // unexpected
+ jmp LOCAL_LABEL(Epilog)
+
+LOCAL_LABEL(ReturnsInt):
+ mov [ebx + CallDescrData__returnValue], eax
+ mov [ebx + CallDescrData__returnValue + 4], edx
+
+LOCAL_LABEL(Epilog):
+ // restore the stake pointer
+ lea esp, [ebp - 4]
+
+ EPILOG_BEG
+ EPILOG_POP ebx
+ EPILOG_END
+ ret 4
+
+LOCAL_LABEL(ReturnsFloat):
+ fstp DWORD PTR [ebx + CallDescrData__returnValue] // Spill the Float return value
+ jmp LOCAL_LABEL(Epilog)
+
+LOCAL_LABEL(ReturnsDouble):
+ fstp QWORD PTR [ebx + CallDescrData__returnValue] // Spill the Double return value
+ jmp LOCAL_LABEL(Epilog)
+NESTED_END CallDescrWorkerInternal, _TEXT
+
+#ifdef _DEBUG
+// int __fastcall HelperMethodFrameRestoreState(HelperMethodFrame*, struct MachState *)
+LEAF_ENTRY HelperMethodFrameRestoreState, _TEXT
+ mov eax, edx // eax = MachState*
+#else // _DEBUG
+// int __fastcall HelperMethodFrameRestoreState(struct MachState *)
+LEAF_ENTRY HelperMethodFrameRestoreState, _TEXT
+ mov eax, ecx // eax = MachState*
+#endif // _DEBUG
+ // restore the registers from the m_MachState stucture. Note that
+ // we only do this for register that where not saved on the stack
+ // at the time the machine state snapshot was taken.
+
+ cmp dword ptr [eax+MachState__pRetAddr], 0
+
+#ifdef _DEBUG
+ jnz LOCAL_LABEL(noConfirm)
+ push ebp
+ push ebx
+ push edi
+ push esi
+ push ecx // HelperFrame*
+ call C_FUNC(HelperMethodFrameConfirmState)
+ // on return, eax = MachState*
+ cmp DWORD PTR [eax + MachState__pRetAddr], 0
+LOCAL_LABEL(noConfirm):
+#endif // _DEBUG
+
+ jz LOCAL_LABEL(doRet)
+
+ lea edx, [eax + MachState__esi] // Did we have to spill ESI
+ cmp [eax + MachState__pEsi], edx
+ jnz LOCAL_LABEL(SkipESI)
+ mov esi, [edx] // Then restore it
+
+LOCAL_LABEL(SkipESI):
+ lea edx, [eax + MachState__edi] // Did we have to spill EDI
+ cmp [eax + MachState__pEdi], edx
+ jnz LOCAL_LABEL(SkipEDI)
+ mov edi, [edx] // Then restore it
+
+LOCAL_LABEL(SkipEDI):
+ lea edx, [eax + MachState__ebx] // Did we have to spill EBX
+ cmp [eax + MachState__pEbx], edx
+ jnz LOCAL_LABEL(SkipEBX)
+ mov ebx, [edx] // Then restore it
+
+LOCAL_LABEL(SkipEBX):
+ lea edx, [eax + MachState__ebp] // Did we have to spill EBP
+ cmp [eax + MachState__pEbp], edx
+ jnz LOCAL_LABEL(SkipEBP)
+ mov ebp, [edx] // Then restore it
+
+LOCAL_LABEL(SkipEBP):
+LOCAL_LABEL(doRet):
+ xor eax, eax
+ ret
+LEAF_END HelperMethodFrameRestoreState, _TEXT
+
+#ifdef FEATURE_HIJACK
+
+// A JITted method's return address was hijacked to return to us here.
+// VOID OnHijackTripThread()
+NESTED_ENTRY OnHijackTripThread, _TEXT, NoHandler
+ // Don't fiddle with this unless you change HijackFrame::UpdateRegDisplay
+ // and HijackArgs
+ push eax // make room for the real return address (Eip)
+ push ebp
+ push eax
+ push ecx
+ push edx
+ push ebx
+ push esi
+ push edi
+
+ // unused space for floating point state
+ sub esp,12
+
+ push esp
+ call C_FUNC(OnHijackWorker)
+
+ // unused space for floating point state
+ add esp,12
+
+ pop edi
+ pop esi
+ pop ebx
+ pop edx
+ pop ecx
+ pop eax
+ pop ebp
+ ret // return to the correct place, adjusted by our caller
+NESTED_END OnHijackTripThread, _TEXT
+
+// VOID OnHijackFPTripThread()
+NESTED_ENTRY OnHijackFPTripThread, _TEXT, NoHandler
+ // Don't fiddle with this unless you change HijackFrame::UpdateRegDisplay
+ // and HijackArgs
+ push eax // make room for the real return address (Eip)
+ push ebp
+ push eax
+ push ecx
+ push edx
+ push ebx
+ push esi
+ push edi
+
+ sub esp,12
+
+ // save top of the floating point stack (there is return value passed in it)
+ // save full 10 bytes to avoid precision loss
+ fstp QWORD PTR [esp]
+
+ push esp
+ call C_FUNC(OnHijackWorker)
+
+ // restore top of the floating point stack
+ fld QWORD PTR [esp]
+
+ add esp,12
+
+ pop edi
+ pop esi
+ pop ebx
+ pop edx
+ pop ecx
+ pop eax
+ pop ebp
+ ret // return to the correct place, adjusted by our caller
+NESTED_END OnHijackFPTripThread, _TEXT
+
+#endif // FEATURE_HIJACK
+
+// ==========================================================================
+// This function is reached only via the embedded ImportThunkGlue code inside
+// an NDirectMethodDesc. It's purpose is to load the DLL associated with an
+// N/Direct method, then backpatch the DLL target into the methoddesc.
+//
+// Initial state:
+//
+// Preemptive GC is *enabled*: we are actually in an unmanaged state.
+//
+//
+// [esp+...] - The *unmanaged* parameters to the DLL target.
+// [esp+4] - Return address back into the JIT'ted code that made
+// the DLL call.
+// [esp] - Contains the "return address." Because we got here
+// thru a call embedded inside a MD, this "return address"
+// gives us an easy to way to find the MD (which was the
+// whole purpose of the embedded call manuever.)
+//
+//
+//
+// ==========================================================================
+LEAF_ENTRY NDirectImportThunk, _TEXT
+ // Preserve argument registers
+ push ecx
+ push edx
+
+ // Invoke the function that does the real work.
+ push eax
+ call C_FUNC(NDirectImportWorker)
+
+ // Restore argument registers
+ pop edx
+ pop ecx
+
+ // If we got back from NDirectImportWorker, the MD has been successfully
+ // linked and "eax" contains the DLL target. Proceed to execute the
+ // original DLL call.
+ jmp eax // Jump to DLL target
+LEAF_END NDirectImportThunk, _TEXT
+
+// ==========================================================================
+// The call in fixup precode initally points to this function.
+// The pupose of this function is to load the MethodDesc and forward the call the prestub.
+LEAF_ENTRY PrecodeFixupThunk, _TEXT
+ // Pop the return address. It points right after the call instruction in the precode.
+ pop eax
+ push esi
+ push edi
+
+ // Inline computation done by FixupPrecode::GetMethodDesc()
+ movzx esi, BYTE PTR [eax + 2] // m_PrecodeChunkIndex
+ movzx edi, BYTE PTR [eax + 1] // m_MethodDescChunkIndex
+ mov eax, DWORD PTR [eax + esi*8 +3]
+ lea eax, [eax + edi*4]
+
+ pop edi
+ pop esi
+ jmp C_FUNC(ThePreStub)
+LEAF_END PrecodeFixupThunk, _TEXT
+
+// void __stdcall UM2MThunk_WrapperHelper(void *pThunkArgs,
+// int argLen,
+// void *pAddr,
+// UMEntryThunk *pEntryThunk,
+// Thread *pThread)
+NESTED_ENTRY UM2MThunk_WrapperHelper, _TEXT, NoHandler
+ push ebx
+
+ mov eax, [esp + 20] // pEntryThunk
+ mov ecx, [esp + 24] // pThread
+ mov ebx, [esp + 8] // pThunkArgs
+ call [esp + 16] // pAddr
+
+ pop ebx
+
+ ret 20
+NESTED_END UM2MThunk_WrapperHelper, _TEXT
+
+NESTED_ENTRY UMThunkStubRareDisable, _TEXT, NoHandler
+ push eax
+ push ecx
+
+ push eax // Push the UMEntryThunk
+ push ecx // Push thread
+ call C_FUNC(UMThunkStubRareDisableWorker)
+
+ pop ecx
+ pop eax
+ ret
+NESTED_END UMThunkStubRareDisable, _TEXT
+
+//
+// Used to get the current instruction pointer value
+//
+// UINT_PTR __stdcall GetCurrentIP(void);
+LEAF_ENTRY GetCurrentIP, _TEXT
+ mov eax, [esp]
+ ret
+LEAF_END GetCurrentIP, _TEXT
+
+// LPVOID __stdcall GetCurrentSP(void);
+LEAF_ENTRY GetCurrentSP, _TEXT
+ mov eax, esp
+ ret
+LEAF_END GetCurrentSP, _TEXT
+
+// ==========================================================================
+// Invoked for vararg forward P/Invoke calls as a stub.
+// Except for secret return buffer, arguments come on the stack so EDX is available as scratch.
+// EAX - the NDirectMethodDesc
+// ECX - may be return buffer address
+// [ESP + 4] - the VASigCookie
+//
+NESTED_ENTRY VarargPInvokeStub, _TEXT, NoHandler
+ // EDX <- VASigCookie
+ mov edx, [esp + 4] // skip retaddr
+
+ mov edx, [edx + VASigCookie__StubOffset]
+ test edx, edx
+
+ jz LOCAL_LABEL(GoCallVarargWorker)
+ // ---------------------------------------
+
+ // EAX contains MD ptr for the IL stub
+ jmp edx
+
+LOCAL_LABEL(GoCallVarargWorker):
+ //
+ // MD ptr in EAX, VASigCookie ptr at [esp+4]
+ //
+ STUB_PROLOG
+
+ mov esi, esp
+
+ // save pMD
+ push eax
+
+ push eax // pMD
+ push dword ptr [esi + 4*7] // pVaSigCookie
+ push esi // pTransitionBlock
+
+ call C_FUNC(VarargPInvokeStubWorker)
+
+ // restore pMD
+ pop eax
+
+ STUB_EPILOG
+
+ // jump back to the helper - this time it won't come back here as the stub already exists
+ jmp C_FUNC(VarargPInvokeStub)
+NESTED_END VarargPInvokeStub, _TEXT
+
+// ==========================================================================
+// Invoked for marshaling-required unmanaged CALLI calls as a stub.
+// EAX - the unmanaged target
+// ECX, EDX - arguments
+// [ESP + 4] - the VASigCookie
+//
+LEAF_ENTRY GenericPInvokeCalliHelper, _TEXT
+ // save the target
+ push eax
+
+ // EAX <- VASigCookie
+ mov eax, [esp + 8] // skip target and retaddr
+
+ mov eax, [eax + VASigCookie__StubOffset]
+ test eax, eax
+
+ jz LOCAL_LABEL(GoCallCalliWorker)
+ // ---------------------------------------
+
+ push eax
+
+ // stack layout at this point:
+ //
+ // | ... |
+ // | stack arguments | ESP + 16
+ // +----------------------+
+ // | VASigCookie* | ESP + 12
+ // +----------------------+
+ // | return address | ESP + 8
+ // +----------------------+
+ // | CALLI target address | ESP + 4
+ // +----------------------+
+ // | stub entry point | ESP + 0
+ // ------------------------
+
+ // remove VASigCookie from the stack
+ mov eax, [esp + 8]
+ mov [esp + 12], eax
+
+ // move stub entry point below the RA
+ mov eax, [esp]
+ mov [esp + 8], eax
+
+ // load EAX with the target address
+ pop eax
+ pop eax
+
+ // stack layout at this point:
+ //
+ // | ... |
+ // | stack arguments | ESP + 8
+ // +----------------------+
+ // | return address | ESP + 4
+ // +----------------------+
+ // | stub entry point | ESP + 0
+ // ------------------------
+
+ // CALLI target address is in EAX
+ ret
+
+LOCAL_LABEL(GoCallCalliWorker):
+ // the target is on the stack and will become m_Datum of PInvokeCalliFrame
+ // call the stub generating worker
+ pop eax
+
+ //
+ // target ptr in EAX, VASigCookie ptr in EDX
+ //
+
+ STUB_PROLOG
+
+ mov esi, esp
+
+ // save target
+ push eax
+
+ push eax // unmanaged target
+ push dword ptr [esi + 4*7] // pVaSigCookie (first stack argument)
+ push esi // pTransitionBlock
+
+ call C_FUNC(GenericPInvokeCalliStubWorker)
+
+ // restore target
+ pop eax
+
+ STUB_EPILOG
+
+ // jump back to the helper - this time it won't come back here as the stub already exists
+ jmp C_FUNC(GenericPInvokeCalliHelper)
+LEAF_END GenericPInvokeCalliHelper, _TEXT
+
+#ifdef FEATURE_PREJIT
+
+// =========================================================================
+NESTED_ENTRY StubDispatchFixupStub, _TEXT, NoHandler
+ STUB_PROLOG
+
+ mov esi, esp
+
+ push 0
+ push 0
+
+ push eax // siteAddrForRegisterIndirect (for tailcalls)
+ push esi // pTransitionBlock
+
+ call C_FUNC(StubDispatchFixupWorker)
+
+ STUB_EPILOG
+
+PATCH_LABEL StubDispatchFixupPatchLabel
+ // Tailcall target
+ jmp eax
+
+ // This will never be executed. It is just to help out stack-walking logic
+ // which disassembles the epilog to unwind the stack.
+ ret
+NESTED_END StubDispatchFixupStub, _TEXT
+
+// ==========================================================================
+NESTED_ENTRY ExternalMethodFixupStub, _TEXT_ NoHandler
+ // pop off the return address to the stub
+ // leaving the actual caller's return address on top of the stack
+ pop eax
+
+ STUB_PROLOG
+
+ mov esi, esp
+
+ // EAX is return address into CORCOMPILE_EXTERNAL_METHOD_THUNK. Subtract 5 to get start address.
+ sub eax, 5
+
+ push 0
+ push 0
+
+ push eax
+
+ // pTransitionBlock
+ push esi
+
+ call C_FUNC(ExternalMethodFixupWorker)
+
+ // eax now contains replacement stub. PreStubWorker will never return
+ // NULL (it throws an exception if stub creation fails.)
+
+ // From here on, mustn't trash eax
+
+ STUB_EPILOG
+
+PATCH_LABEL ExternalMethodFixupPatchLabel
+ // Tailcall target
+ jmp eax
+
+ // This will never be executed. It is just to help out stack-walking logic
+ // which disassembles the epilog to unwind the stack.
+ ret
+NESTED_END ExternalMethodFixupStub, _TEXT
+
+#ifdef FEATURE_READYTORUN
+// ==========================================================================
+NESTED_ENTRY DelayLoad_MethodCall, _TEXT, NoHandler
+ STUB_PROLOG_2_HIDDEN_ARGS
+
+ mov esi, esp
+
+ push ecx
+ push edx
+
+ push eax
+
+ // pTransitionBlock
+ push esi
+
+ call C_FUNC(ExternalMethodFixupWorker)
+
+ // eax now contains replacement stub. PreStubWorker will never return
+ // NULL (it throws an exception if stub creation fails.)
+
+ // From here on, mustn't trash eax
+
+ STUB_EPILOG
+
+ // Share the patch label
+ jmp C_FUNC(ExternalMethodFixupPatchLabel)
+
+ // This will never be executed. It is just to help out stack-walking logic
+ // which disassembles the epilog to unwind the stack.
+ ret
+NESTED_END DelayLoad_MethodCall, _TEXT
+
+#endif // FEATURE_READYTORUN
+
+// =======================================================================================
+// The call in softbound vtable slots initially points to this function.
+// The pupose of this function is to transfer the control to right target and
+// to optionally patch the target of the jump so that we do not take this slow path again.
+//
+NESTED_ENTRY VirtualMethodFixupStub, _TEXT, NoHandler
+ // Pop the return address. It points right after the call instruction in the thunk.
+ pop eax
+ // Calculate the address of the thunk
+ sub eax, 5
+
+ // Push ebp frame to get good callstack under debugger
+ PROLOG_BEG
+
+ // Preserve argument registers
+ PROLOG_PUSH ecx
+ PROLOG_PUSH edx
+
+ // Set frame pointer
+ PROLOG_END
+
+ push eax // address of the thunk
+ push ecx // this ptr
+ call C_FUNC(VirtualMethodFixupWorker)
+
+ // Restore stack pointer
+ EPILOG_BEG
+
+ // Restore argument registers
+ EPILOG_POP edx
+ EPILOG_POP ecx
+
+ // Pop ebp frame
+ EPILOG_END
+
+PATCH_LABEL VirtualMethodFixupPatchLabel
+ // Proceed to execute the actual method.
+ jmp eax
+
+ // This will never be executed. It is just to help out stack-walking logic
+ // which disassembles the epilog to unwind the stack.
+ ret
+NESTED_END VirtualMethodFixupStub, _TEXT
+
+#endif // FEATURE_PREJIT
+
+NESTED_ENTRY ThePreStub, _TEXT, NoHandler
+ STUB_PROLOG
+
+ mov esi, esp
+
+ // Compute padding size
+ lea ebx, [esp - 8]
+ and ebx, 15
+ // Adjust stack offset
+ sub esp, ebx
+
+ // EAX contains MethodDesc* from the precode. Push it here as argument
+ // for PreStubWorker
+ push eax
+
+ push esi
+
+ CHECK_STACK_ALIGNMENT
+ call C_FUNC(PreStubWorker)
+
+ // eax now contains replacement stub. PreStubWorker will never return
+ // NULL (it throws an exception if stub creation fails.)
+
+ // From here on, mustn't trash eax
+
+ // Restore stack pointer
+ mov esp, esi
+
+ STUB_EPILOG
+
+ // Tailcall target
+ jmp eax
+
+ // This will never be executed. It is just to help out stack-walking logic
+ // which disassembles the epilog to unwind the stack.
+ ret
+NESTED_END ThePreStub, _TEXT
+
+// This method does nothing. It's just a fixed function for the debugger to put a breakpoint
+// on so that it can trace a call target.
+LEAF_ENTRY ThePreStubPatch, _TEXT
+ // make sure that the basic block is unique
+ test eax,34
+
+PATCH_LABEL ThePreStubPatchLabel
+ ret
+LEAF_END ThePreStubPatch, _TEXT
+
+#ifdef FEATURE_READYTORUN
+// ==========================================================================
+// Define helpers for delay loading of readytorun helpers
+
+.macro DYNAMICHELPER frameFlags, suffix
+
+NESTED_ENTRY DelayLoad_Helper\suffix, _TEXT, NoHandler
+ STUB_PROLOG_2_HIDDEN_ARGS
+
+ mov esi, esp
+
+ push \frameFlags
+ push ecx // module
+ push edx // section index
+
+ push eax // indirection cell address.
+ push esi // pTransitionBlock
+
+ call C_FUNC(DynamicHelperWorker)
+ test eax,eax
+ jnz LOCAL_LABEL(TailCallDelayLoad_Helper\suffix)
+
+ mov eax, [esi] // The result is stored in the argument area of the transition block
+ STUB_EPILOG_RETURN
+ ret
+
+LOCAL_LABEL(TailCallDelayLoad_Helper\suffix):
+ STUB_EPILOG
+ jmp eax
+NESTED_END DelayLoad_Helper\suffix, _TEXT
+.endm
+
+DYNAMICHELPER DynamicHelperFrameFlags_Default
+DYNAMICHELPER DynamicHelperFrameFlags_ObjectArg, _Obj
+DYNAMICHELPER (DynamicHelperFrameFlags_ObjectArg | DynamicHelperFrameFlags_ObjectArg2), _ObjObj
+
+#endif // FEATURE_READYTORUN
+
+NESTED_ENTRY ResolveWorkerAsmStub, _TEXT, NoHandler
+ //
+ // The stub arguments are where we want to setup the TransitionBlock. We will
+ // setup the TransitionBlock later once we can trash them
+ //
+ // push ebp-frame
+ // push ebp
+ // mov ebp,esp
+
+ // save CalleeSavedRegisters
+ // push ebx
+
+ push esi
+ push edi
+
+ // push ArgumentRegisters
+ push ecx
+ push edx
+
+ mov esi, esp
+
+ push [esi + 4*4] // dispatch token
+ push [esi + 5*4] // siteAddrForRegisterIndirect
+ push esi // pTransitionBlock
+
+ // Setup up proper EBP frame now that the stub arguments can be trashed
+ mov [esi + 4*4],ebx
+ mov [esi + 5*4],ebp
+ lea ebp, [esi + 5*4]
+
+ // Make the call
+ call C_FUNC(VSD_ResolveWorker)
+
+ // From here on, mustn't trash eax
+
+ // pop ArgumentRegisters
+ pop edx
+ pop ecx
+
+ // pop CalleeSavedRegisters
+ pop edi
+ pop esi
+ pop ebx
+ pop ebp
+
+ // Now jump to the target
+ jmp eax // continue on into the method
+NESTED_END ResolveWorkerAsmStub, _TEXT
diff --git a/src/vm/i386/asmhelpers.asm b/src/vm/i386/asmhelpers.asm
index 66a22b7962..0456be82db 100644
--- a/src/vm/i386/asmhelpers.asm
+++ b/src/vm/i386/asmhelpers.asm
@@ -43,7 +43,6 @@ TlsGetValue PROTO stdcall
ifdef FEATURE_HIJACK
EXTERN _OnHijackWorker@4:PROC
endif ;FEATURE_HIJACK
-EXTERN _COMPlusEndCatch@20:PROC
EXTERN _COMPlusFrameHandler:PROC
ifdef FEATURE_COMINTEROP
EXTERN _COMPlusFrameHandlerRevCom:PROC
@@ -1005,33 +1004,6 @@ OnHijackFPTripThread ENDP
endif ; FEATURE_HIJACK
-; Note that the debugger skips this entirely when doing SetIP,
-; since COMPlusCheckForAbort should always return 0. Excep.cpp:LeaveCatch
-; asserts that to be true. If this ends up doing more work, then the
-; debugger may need additional support.
-; void __stdcall JIT_EndCatch();
-JIT_EndCatch PROC stdcall public
-
- ; make temp storage for return address, and push the address of that
- ; as the last arg to COMPlusEndCatch
- mov ecx, [esp]
- push ecx;
- push esp;
-
- ; push the rest of COMPlusEndCatch's args, right-to-left
- push esi
- push edi
- push ebx
- push ebp
-
- call _COMPlusEndCatch@20 ; returns old esp value in eax, stores jump address
- ; now eax = new esp, [esp] = new eip
-
- pop edx ; edx = new eip
- mov esp, eax ; esp = new esp
- jmp edx ; eip = new eip
-
-JIT_EndCatch ENDP
;==========================================================================
; This function is reached only via the embedded ImportThunkGlue code inside
diff --git a/src/vm/i386/cgencpu.h b/src/vm/i386/cgencpu.h
index 2da98821bc..99f4eb498f 100644
--- a/src/vm/i386/cgencpu.h
+++ b/src/vm/i386/cgencpu.h
@@ -43,6 +43,10 @@ EXTERN_C void STDCALL PInvokeStackImbalanceHelper(void);
EXTERN_C void STDCALL CopyCtorCallStub(void);
#endif // !FEATURE_CORECLR
+#ifdef FEATURE_STUBS_AS_IL
+EXTERN_C void SinglecastDelegateInvokeStub();
+#endif // FEATURE_STUBS_AS_IL
+
BOOL Runtime_Test_For_SSE2();
#ifdef CROSSGEN_COMPILE
@@ -476,7 +480,7 @@ inline BOOL IsUnmanagedValueTypeReturnedByRef(UINT sizeofvaluetype)
}
#include <pshpack1.h>
-DECLSPEC_ALIGN(4) struct UMEntryThunkCode
+struct DECLSPEC_ALIGN(4) UMEntryThunkCode
{
BYTE m_alignpad[2]; // used to guarantee alignment of backpactched portion
BYTE m_movEAX; //MOV EAX,imm32
@@ -562,6 +566,7 @@ inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode)
// #define JIT_GetSharedGCStaticBaseNoCtor
// #define JIT_GetSharedNonGCStaticBaseNoCtor
+#ifndef FEATURE_PAL
#define JIT_ChkCastClass JIT_ChkCastClass
#define JIT_ChkCastClassSpecial JIT_ChkCastClassSpecial
#define JIT_IsInstanceOfClass JIT_IsInstanceOfClass
@@ -569,5 +574,5 @@ inline BOOL ClrFlushInstructionCache(LPCVOID pCodeAddr, size_t sizeOfCode)
#define JIT_IsInstanceOfInterface JIT_IsInstanceOfInterface
#define JIT_NewCrossContext JIT_NewCrossContext
#define JIT_Stelem_Ref JIT_Stelem_Ref
-
+#endif // FEATURE_PAL
#endif // __cgenx86_h__
diff --git a/src/vm/i386/cgenx86.cpp b/src/vm/i386/cgenx86.cpp
index ff2f2df5a3..08ccd01086 100644
--- a/src/vm/i386/cgenx86.cpp
+++ b/src/vm/i386/cgenx86.cpp
@@ -760,6 +760,7 @@ WORD GetUnpatchedCodeData(LPCBYTE pAddr)
#ifndef DACCESS_COMPILE
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
//-------------------------------------------------------------------------
// One-time creation of special prestub to initialize UMEntryThunks.
//-------------------------------------------------------------------------
@@ -809,6 +810,7 @@ Stub *GenerateUMThunkPrestub()
RETURN psl->Link(SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap());
}
+#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
Stub *GenerateInitPInvokeFrameHelper()
{
@@ -1593,6 +1595,7 @@ extern "C" VOID STDCALL StubRareDisableTHROWWorker(Thread *pThread)
pThread->HandleThreadAbort();
}
+#ifndef FEATURE_PAL
// Note that this logic is copied below, in PopSEHRecords
__declspec(naked)
VOID __cdecl PopSEHRecords(LPVOID pTargetSP)
@@ -1614,6 +1617,7 @@ VOID __cdecl PopSEHRecords(LPVOID pTargetSP)
retn
}
}
+#endif // FEATURE_PAL
//////////////////////////////////////////////////////////////////////////////
//
@@ -1680,9 +1684,10 @@ void ResumeAtJit(PCONTEXT pContext, LPVOID oldESP)
#endif // !EnC_SUPPORTED
+#ifndef FEATURE_PAL
#pragma warning(push)
#pragma warning(disable: 4035)
-DWORD getcpuid(DWORD arg, unsigned char result[16])
+extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16])
{
LIMITED_METHOD_CONTRACT
@@ -1709,7 +1714,7 @@ DWORD getcpuid(DWORD arg, unsigned char result[16])
// Arg3 is a pointer to the return buffer
// No need to check whether or not CPUID is supported because we have already called CPUID with success to come here.
-DWORD getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16])
+extern "C" DWORD __stdcall getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16])
{
LIMITED_METHOD_CONTRACT
@@ -1730,8 +1735,75 @@ DWORD getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16])
}
}
+extern "C" DWORD __stdcall xmmYmmStateSupport()
+{
+ // No CONTRACT
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
+ __asm
+ {
+ mov ecx, 0 ; Specify xcr0
+ xgetbv ; result in EDX:EAX
+ and eax, 06H
+ cmp eax, 06H ; check OS has enabled both XMM and YMM state support
+ jne not_supported
+ mov eax, 1
+ jmp done
+ not_supported:
+ mov eax, 0
+ done:
+ }
+}
+
#pragma warning(pop)
+#else // !FEATURE_PAL
+
+extern "C" DWORD __stdcall getcpuid(DWORD arg, unsigned char result[16])
+{
+ DWORD eax;
+ __asm(" xor %%ecx, %%ecx\n" \
+ " cpuid\n" \
+ " mov %%eax, 0(%[result])\n" \
+ " mov %%ebx, 4(%[result])\n" \
+ " mov %%ecx, 8(%[result])\n" \
+ " mov %%edx, 12(%[result])\n" \
+ : "=a"(eax) /*output in eax*/\
+ : "a"(arg), [result]"r"(result) /*inputs - arg in eax, result in any register*/\
+ : "eax", "rbx", "ecx", "edx", "memory" /* registers that are clobbered, *result is clobbered */
+ );
+ return eax;
+}
+
+extern "C" DWORD __stdcall getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16])
+{
+ DWORD eax;
+ __asm(" cpuid\n" \
+ " mov %%eax, 0(%[result])\n" \
+ " mov %%ebx, 4(%[result])\n" \
+ " mov %%ecx, 8(%[result])\n" \
+ " mov %%edx, 12(%[result])\n" \
+ : "=a"(eax) /*output in eax*/\
+ : "c"(arg1), "a"(arg2), [result]"r"(result) /*inputs - arg1 in ecx, arg2 in eax, result in any register*/\
+ : "eax", "rbx", "ecx", "edx", "memory" /* registers that are clobbered, *result is clobbered */
+ );
+ return eax;
+}
+
+extern "C" DWORD __stdcall xmmYmmStateSupport()
+{
+ DWORD eax;
+ __asm(" xgetbv\n" \
+ : "=a"(eax) /*output in eax*/\
+ : "c"(0) /*inputs - 0 in ecx*/\
+ : "eax", "edx" /* registers that are clobbered*/
+ );
+ // check OS has enabled both XMM and YMM state support
+ return ((eax & 0x06) == 0x06) ? 1 : 0;
+}
+
+#endif // !FEATURE_PAL
// This function returns the number of logical processors on a given physical chip. If it cannot
// determine the number of logical cpus, or the machine is not populated uniformly with the same
@@ -1761,13 +1833,14 @@ DWORD GetLogicalCpuCount()
PAL_TRY(Param *, pParam, &param)
{
unsigned char buffer[16];
+ DWORD* dwBuffer = NULL;
DWORD maxCpuId = getcpuid(0, buffer);
if (maxCpuId < 1)
goto lDone;
- DWORD* dwBuffer = (DWORD*)buffer;
+ dwBuffer = (DWORD*)buffer;
if (dwBuffer[1] == 'uneG') {
if (dwBuffer[3] == 'Ieni') {
diff --git a/src/vm/i386/excepcpu.h b/src/vm/i386/excepcpu.h
index 3f2f0810a7..ff540e784b 100644
--- a/src/vm/i386/excepcpu.h
+++ b/src/vm/i386/excepcpu.h
@@ -28,12 +28,37 @@ class Thread;
// Actually, the handler getting set is properly registered
#endif
+#ifdef FEATURE_PAL
+
+extern VOID SetSEHRecord(PEXCEPTION_REGISTRATION_RECORD record);
+extern VOID ResetSEHRecord(PEXCEPTION_REGISTRATION_RECORD record);
+
+#define INSTALL_SEH_RECORD(record) \
+ SetSEHRecord(record); \
+
+#define UNINSTALL_SEH_RECORD(record) \
+ ResetSEHRecord(record);
+
+#else // FEATURE_PAL
+
+#define INSTALL_SEH_RECORD(record) \
+ { \
+ (record)->Next = (PEXCEPTION_REGISTRATION_RECORD)__readfsdword(0); \
+ __writefsdword(0, (DWORD) (record)); \
+ }
+
+#define UNINSTALL_SEH_RECORD(record) \
+ { \
+ __writefsdword(0, (DWORD) ((record)->Next)); \
+ }
+
+#endif // FEATURE_PAL
+
#define INSTALL_EXCEPTION_HANDLING_RECORD(record) \
{ \
PEXCEPTION_REGISTRATION_RECORD __record = (record); \
_ASSERTE(__record < GetCurrentSEHRecord()); \
- __record->Next = (PEXCEPTION_REGISTRATION_RECORD)__readfsdword(0); \
- __writefsdword(0, (DWORD)__record); \
+ INSTALL_SEH_RECORD(record); \
}
//
@@ -44,7 +69,7 @@ class Thread;
{ \
PEXCEPTION_REGISTRATION_RECORD __record = (record); \
_ASSERTE(__record == GetCurrentSEHRecord()); \
- __writefsdword(0, (DWORD)__record->Next); \
+ UNINSTALL_SEH_RECORD(record); \
}
// stackOverwriteBarrier is used to detect overwriting of stack which will mess up handler registration
diff --git a/src/vm/i386/excepx86.cpp b/src/vm/i386/excepx86.cpp
index 27c923b749..71200f671f 100644
--- a/src/vm/i386/excepx86.cpp
+++ b/src/vm/i386/excepx86.cpp
@@ -19,7 +19,7 @@
#include "comutilnative.h"
#include "sigformat.h"
#include "siginfo.hpp"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "eedbginterfaceimpl.h" //so we can clearexception in COMPlusThrow
#include "perfcounters.h"
#include "eventtrace.h"
@@ -53,13 +53,15 @@ VOID STDCALL ResumeAtJitEHHelper(EHContext *pContext);
int STDCALL CallJitEHFilterHelper(size_t *pShadowSP, EHContext *pContext);
VOID STDCALL CallJitEHFinallyHelper(size_t *pShadowSP, EHContext *pContext);
+typedef void (*RtlUnwindCallbackType)(void);
+
BOOL CallRtlUnwind(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
- void *callback,
+ RtlUnwindCallbackType callback,
EXCEPTION_RECORD *pExceptionRecord,
void *retval);
BOOL CallRtlUnwindSafe(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
- void *callback,
+ RtlUnwindCallbackType callback,
EXCEPTION_RECORD *pExceptionRecord,
void *retval);
}
@@ -371,6 +373,7 @@ CPFH_AdjustContextForThreadSuspensionRace(CONTEXT *pContext, Thread *pThread)
{
WRAPPER_NO_CONTRACT;
+#ifndef FEATURE_PAL
PCODE f_IP = GetIP(pContext);
if (Thread::IsAddrOfRedirectFunc((PVOID)f_IP)) {
@@ -427,22 +430,13 @@ CPFH_AdjustContextForThreadSuspensionRace(CONTEXT *pContext, Thread *pThread)
SetIP(pContext, GetIP(pThread->m_OSContext) - 1);
STRESS_LOG1(LF_EH, LL_INFO100, "CPFH_AdjustContextForThreadSuspensionRace: Case 4 setting IP = %x\n", pContext->Eip);
}
+#else
+ PORTABILITY_ASSERT("CPFH_AdjustContextForThreadSuspensionRace");
+#endif
}
#endif // FEATURE_HIJACK
-// We want to leave true null reference exceptions alone. But if we are
-// trashing memory, we don't want the application to swallow it. The 0x100
-// below will give us false positives for debugging, if the app is accessing
-// a field more than 256 bytes down an object, where the reference is null.
-//
-// Removed use of the IgnoreUnmanagedExceptions reg key...simply return false now.
-//
-static inline BOOL
-CPFH_ShouldIgnoreException(EXCEPTION_RECORD *pExceptionRecord) {
- LIMITED_METHOD_CONTRACT;
- return FALSE;
-}
static inline void
CPFH_UpdatePerformanceCounters() {
@@ -620,7 +614,7 @@ EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept(EXCEPTION_REGISTRATION_REC
// This rethrow issue does not affect COMPLUS exceptions since we always create a brand new exception
// record for them in RaiseTheExceptionInternalOnly.
BOOL CallRtlUnwindSafe(EXCEPTION_REGISTRATION_RECORD *pEstablisherFrame,
- void *callback,
+ RtlUnwindCallbackType callback,
EXCEPTION_RECORD *pExceptionRecord,
void *retval)
{
@@ -1153,6 +1147,7 @@ CPFH_RealFirstPassHandler( // ExceptionContinueSearch, etc.
pExInfo->m_pExceptionPointers = &exceptionPointers;
+#ifndef FEATURE_PAL
if (bRethrownException || bNestedException)
{
_ASSERTE(pExInfo->m_pPrevNestedInfo != NULL);
@@ -1161,6 +1156,7 @@ CPFH_RealFirstPassHandler( // ExceptionContinueSearch, etc.
SetStateForWatsonBucketing(bRethrownException, pExInfo->GetPreviousExceptionTracker()->GetThrowableAsHandle());
END_SO_INTOLERANT_CODE;
}
+#endif
#ifdef DEBUGGING_SUPPORTED
//
@@ -1975,11 +1971,17 @@ PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(CONTEXT * pContext)
}
#if !defined(DACCESS_COMPILE)
+#ifdef FEATURE_PAL
+static PEXCEPTION_REGISTRATION_RECORD CurrentSEHRecord = EXCEPTION_CHAIN_END;
+#endif
PEXCEPTION_REGISTRATION_RECORD GetCurrentSEHRecord()
{
WRAPPER_NO_CONTRACT;
+#ifdef FEATURE_PAL
+ LPVOID fs0 = CurrentSEHRecord;
+#else // FEATURE_PAL
LPVOID fs0 = (LPVOID)__readfsdword(0);
#if 0 // This walk is too expensive considering we hit it every time we a CONTRACT(NOTHROW)
@@ -2010,19 +2012,39 @@ PEXCEPTION_REGISTRATION_RECORD GetCurrentSEHRecord()
pEHR = pEHR->Next;
}
#endif
-#endif
+#endif // 0
+#endif // FEATURE_PAL
return (EXCEPTION_REGISTRATION_RECORD*) fs0;
}
+#ifdef FEATURE_PAL
+VOID SetSEHRecord(PEXCEPTION_REGISTRATION_RECORD record)
+{
+ WRAPPER_NO_CONTRACT;
+ record->Next = CurrentSEHRecord;
+ CurrentSEHRecord = record;
+}
+
+VOID ResetSEHRecord(PEXCEPTION_REGISTRATION_RECORD record)
+{
+ CurrentSEHRecord = record->Next;
+}
+#endif // FEATURE_PAL
+
PEXCEPTION_REGISTRATION_RECORD GetFirstCOMPlusSEHRecord(Thread *pThread) {
WRAPPER_NO_CONTRACT;
+#ifndef FEATURE_PAL
EXCEPTION_REGISTRATION_RECORD *pEHR = *(pThread->GetExceptionListPtr());
if (pEHR == EXCEPTION_CHAIN_END || IsUnmanagedToManagedSEHHandler(pEHR)) {
return pEHR;
} else {
return GetNextCOMPlusSEHRecord(pEHR);
}
+#else // FEATURE_PAL
+ PORTABILITY_ASSERT("GetFirstCOMPlusSEHRecord");
+ return NULL;
+#endif // FEATURE_PAL
}
@@ -2048,7 +2070,11 @@ PEXCEPTION_REGISTRATION_RECORD GetPrevSEHRecord(EXCEPTION_REGISTRATION_RECORD *n
VOID SetCurrentSEHRecord(EXCEPTION_REGISTRATION_RECORD *pSEH)
{
WRAPPER_NO_CONTRACT;
+#ifndef FEATURE_PAL
*GetThread()->GetExceptionListPtr() = pSEH;
+#else // FEATURE_PAL
+ _ASSERTE("NYI");
+#endif // FEATURE_PAL
}
@@ -2085,6 +2111,7 @@ BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers)
STATIC_CONTRACT_GC_NOTRIGGER;
STATIC_CONTRACT_SO_TOLERANT;
+#ifndef FEATURE_PAL
PEXCEPTION_REGISTRATION_RECORD pEHR = GetCurrentSEHRecord();
while ((LPVOID)pEHR < pTargetSP)
@@ -2140,6 +2167,10 @@ BOOL PopNestedExceptionRecords(LPVOID pTargetSP, BOOL bCheckForUnknownHandlers)
SetCurrentSEHRecord(pEHR);
}
return FALSE;
+#else // FEATURE_PAL
+ PORTABILITY_ASSERT("PopNestedExceptionRecords");
+ return FALSE;
+#endif // FEATURE_PAL
}
//
@@ -2245,6 +2276,7 @@ int COMPlusThrowCallbackHelper(IJitManager *pJitManager,
int iFilt = 0;
BOOL impersonating = FALSE;
+#ifndef FEATURE_PAL
EX_TRY
{
GCPROTECT_BEGIN (throwable);
@@ -2295,6 +2327,10 @@ int COMPlusThrowCallbackHelper(IJitManager *pJitManager,
EX_END_CATCH(SwallowAllExceptions)
return iFilt;
+#else // FEATURE_PAL
+ PORTABILITY_ASSERT("COMPlusThrowCallbackHelper");
+ return EXCEPTION_CONTINUE_SEARCH;
+#endif // FEATURE_PAL
}
//******************************************************************************
@@ -2409,6 +2445,7 @@ StackWalkAction COMPlusThrowCallback( // SWA value
pData->bSkipLastElement = FALSE;
}
+#ifndef FEATURE_PAL
// Check for any impersonation on the frame and save that for use during EH filter callbacks
OBJECTREF* pRefSecDesc = pCf->GetAddrOfSecurityObject();
if (pRefSecDesc != NULL && *pRefSecDesc != NULL)
@@ -2427,6 +2464,7 @@ StackWalkAction COMPlusThrowCallback( // SWA value
}
}
}
+#endif // !FEATURE_PAL
// now we've got the stack trace, if we aren't allowed to catch this and we're first pass, return
if (pData->bDontCatch)
@@ -2604,9 +2642,9 @@ StackWalkAction COMPlusThrowCallback( // SWA value
// EX_CATCH just above us. If not, the exception
if ( IsFilterHandler(&EHClause)
&& ( offs > EHClause.FilterOffset
- || offs == EHClause.FilterOffset && !start_adjust)
+ || (offs == EHClause.FilterOffset && !start_adjust) )
&& ( offs < EHClause.HandlerStartPC
- || offs == EHClause.HandlerStartPC && !end_adjust)) {
+ || (offs == EHClause.HandlerStartPC && !end_adjust) )) {
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusThrowCallback: Fault inside filter [%d,%d] startAdj %d endAdj %d\n",
EHClause.FilterOffset, EHClause.HandlerStartPC, start_adjust, end_adjust);
@@ -2978,9 +3016,9 @@ StackWalkAction COMPlusUnwindCallback (CrawlFrame *pCf, ThrowCallbackType *pData
if ( IsFilterHandler(&EHClause)
&& ( offs > EHClause.FilterOffset
- || offs == EHClause.FilterOffset && !start_adjust)
+ || (offs == EHClause.FilterOffset && !start_adjust) )
&& ( offs < EHClause.HandlerStartPC
- || offs == EHClause.HandlerStartPC && !end_adjust)
+ || (offs == EHClause.HandlerStartPC && !end_adjust) )
) {
STRESS_LOG4(LF_EH, LL_INFO100, "COMPlusUnwindCallback: Fault inside filter [%d,%d] startAdj %d endAdj %d\n",
EHClause.FilterOffset, EHClause.HandlerStartPC, start_adjust, end_adjust);
@@ -3731,4 +3769,10 @@ AdjustContextForVirtualStub(
return TRUE;
}
+#ifdef FEATURE_PAL
+VOID DECLSPEC_NORETURN DispatchManagedException(PAL_SEHException& ex, bool isHardwareException)
+{
+ UNREACHABLE();
+}
+#endif
#endif // !DACCESS_COMPILE
diff --git a/src/vm/i386/gmsasm.S b/src/vm/i386/gmsasm.S
new file mode 100644
index 0000000000..1e43fd281f
--- /dev/null
+++ b/src/vm/i386/gmsasm.S
@@ -0,0 +1,28 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+// int __fastcall LazyMachStateCaptureState(struct LazyMachState *pState);
+LEAF_ENTRY LazyMachStateCaptureState, _TEXT
+ // marks that this is not yet valid
+ mov dword ptr [ecx+MachState__pRetAddr], 0
+
+ // remember register values
+ mov [ecx+MachState__edi], edi
+ mov [ecx+MachState__esi], esi
+ mov [ecx+MachState__ebx], ebx
+ mov [ecx+LazyMachState_captureEbp], ebp
+ mov [ecx+LazyMachState_captureEsp], esp
+
+ // capture return address
+ mov eax, [esp]
+ mov dword ptr [ecx+LazyMachState_captureEip], eax
+
+ // return 0
+ xor eax, eax
+ ret
+LEAF_END LazyMachStateCaptureState, _TEXT
diff --git a/src/vm/i386/gmsx86.cpp b/src/vm/i386/gmsx86.cpp
index e7e16b70ab..34d65856fe 100644
--- a/src/vm/i386/gmsx86.cpp
+++ b/src/vm/i386/gmsx86.cpp
@@ -9,6 +9,11 @@
#include "common.h"
#include "gmscpu.h"
+#ifdef FEATURE_PAL
+#define USE_EXTERNAL_UNWINDER
+#endif
+
+#ifndef USE_EXTERNAL_UNWINDER
/***************************************************************/
/* setMachState figures out what the state of the CPU will be
when the function that calls 'setMachState' returns. It stores
@@ -42,18 +47,31 @@
#if !defined(DACCESS_COMPILE)
+#ifdef _MSC_VER
#pragma optimize("gsy", on ) // optimize to insure that code generation does not have junk in it
+#endif // _MSC_VER
#pragma warning(disable:4717)
static int __stdcall zeroFtn() {
return 0;
}
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Winfinite-recursion"
+#endif
+
static int __stdcall recursiveFtn() {
return recursiveFtn()+1;
}
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif
+
+#ifdef _MSC_VER
#pragma optimize("", on )
+#endif // _MSC_VER
/* Has mscorwks been instrumented so that calls are morphed into push XXXX call <helper> */
@@ -670,6 +688,10 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState,
ip += 2;
break;
+ case 0x34: // XOR AL, imm8
+ ip += 2;
+ break;
+
case 0x31:
case 0x32:
case 0x33:
@@ -866,6 +888,10 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState,
datasize = b16bit?2:4;
goto decodeRM;
+ case 0x24: // AND AL, imm8
+ ip += 2;
+ break;
+
case 0x01: // ADD mod/rm
case 0x03:
case 0x29: // SUB mod/rm
@@ -1108,7 +1134,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState,
goto again;
}
#ifndef _PREFIX_
- *((int*) 0) = 1; // If you get at this error, it is because yout
+ *((volatile int*) 0) = 1; // If you get at this error, it is because yout
// set a breakpoint in a helpermethod frame epilog
// you can't do that unfortunately. Just move it
// into the interior of the method to fix it
@@ -1225,7 +1251,7 @@ void LazyMachState::unwindLazyState(LazyMachState* baseState,
// FIX what to do here?
#ifndef DACCESS_COMPILE
#ifndef _PREFIX_
- *((unsigned __int8**) 0) = ip; // cause an access violation (Free Build assert)
+ *((volatile PTR_BYTE*) 0) = ip; // cause an access violation (Free Build assert)
#endif // !_PREFIX_
#else
DacNotImpl();
@@ -1243,3 +1269,109 @@ done:
#ifdef _PREFAST_
#pragma warning(pop)
#endif
+#else // !USE_EXTERNAL_UNWINDER
+
+void LazyMachState::unwindLazyState(LazyMachState* baseState,
+ MachState* lazyState,
+ DWORD threadId,
+ int funCallDepth /* = 1 */,
+ HostCallPreference hostCallPreference /* = (HostCallPreference)(-1) */)
+{
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ SO_TOLERANT;
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+ CONTEXT ctx;
+ KNONVOLATILE_CONTEXT_POINTERS nonVolRegPtrs;
+
+ ctx.Eip = baseState->captureEip;
+ ctx.Esp = baseState->captureEsp;
+ ctx.Ebp = baseState->captureEbp;
+
+ ctx.Edi = lazyState->_edi = baseState->_edi;
+ ctx.Esi = lazyState->_esi = baseState->_esi;
+ ctx.Ebx = lazyState->_ebx = baseState->_ebx;
+
+ nonVolRegPtrs.Edi = &(lazyState->_edi);
+ nonVolRegPtrs.Esi = &(lazyState->_esi);
+ nonVolRegPtrs.Ebx = &(lazyState->_ebx);
+ nonVolRegPtrs.Ebp = &(lazyState->_ebp);
+
+ PCODE pvControlPc;
+
+ LOG((LF_GCROOTS, LL_INFO100000, "STACKWALK LazyMachState::unwindLazyState(ip:%p,bp:%p,sp:%p)\n", baseState->captureEip, baseState->captureEbp, baseState->captureEsp));
+
+ do
+ {
+#ifdef DACCESS_COMPILE
+ HRESULT hr = DacVirtualUnwind(threadId, &ctx, &nonVolRegPtrs);
+ if (FAILED(hr))
+ {
+ DacError(hr);
+ }
+#else
+ BOOL success = PAL_VirtualUnwind(&ctx, &nonVolRegPtrs);
+ if (!success)
+ {
+ _ASSERTE(!"unwindLazyState: Unwinding failed");
+ EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE);
+ }
+#endif // DACCESS_COMPILE
+
+ pvControlPc = GetIP(&ctx);
+
+ if (funCallDepth > 0)
+ {
+ --funCallDepth;
+ if (funCallDepth == 0)
+ break;
+ }
+ else
+ {
+ // Determine whether given IP resides in JITted code. (It returns nonzero in that case.)
+ // Use it now to see if we've unwound to managed code yet.
+ BOOL fFailedReaderLock = FALSE;
+ BOOL fIsManagedCode = ExecutionManager::IsManagedCode(pvControlPc, hostCallPreference, &fFailedReaderLock);
+ if (fFailedReaderLock)
+ {
+ // We don't know if we would have been able to find a JIT
+ // manager, because we couldn't enter the reader lock without
+ // yielding (and our caller doesn't want us to yield). So abort
+ // now.
+
+ // Invalidate the lazyState we're returning, so the caller knows
+ // we aborted before we could fully unwind
+ lazyState->_pRetAddr = NULL;
+ return;
+ }
+
+ if (fIsManagedCode)
+ break;
+ }
+ }
+ while(TRUE);
+
+ lazyState->_esp = ctx.Esp;
+ lazyState->_pRetAddr = PTR_TADDR(lazyState->_esp - 4);
+
+ lazyState->_edi = ctx.Edi;
+ lazyState->_esi = ctx.Esi;
+ lazyState->_ebx = ctx.Ebx;
+ lazyState->_ebp = ctx.Ebp;
+
+#ifdef DACCESS_COMPILE
+ lazyState->_pEdi = NULL;
+ lazyState->_pEsi = NULL;
+ lazyState->_pEbx = NULL;
+ lazyState->_pEbp = NULL;
+#else // DACCESS_COMPILE
+ lazyState->_pEdi = nonVolRegPtrs.Edi;
+ lazyState->_pEsi = nonVolRegPtrs.Esi;
+ lazyState->_pEbx = nonVolRegPtrs.Ebx;
+ lazyState->_pEbp = nonVolRegPtrs.Ebp;
+#endif // DACCESS_COMPILE
+}
+#endif // !USE_EXTERNAL_UNWINDER
diff --git a/src/vm/i386/jithelp.S b/src/vm/i386/jithelp.S
new file mode 100644
index 0000000000..66ae9fb451
--- /dev/null
+++ b/src/vm/i386/jithelp.S
@@ -0,0 +1,749 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+
+// ***
+// JIT_WriteBarrier* - GC write barrier helper
+//
+// Purpose:
+// Helper calls in order to assign an object to a field
+// Enables book-keeping of the GC.
+//
+// Entry:
+// EDX - address of ref-field (assigned to)
+// the resp. other reg - RHS of assignment
+//
+// Exit:
+//
+// Uses:
+// EDX is destroyed.
+//
+// Exceptions:
+//
+// *******************************************************************************
+
+// The code here is tightly coupled with AdjustContextForWriteBarrier, if you change
+// anything here, you might need to change AdjustContextForWriteBarrier as well
+.macro WriteBarrierHelper rg
+.align 4
+
+// The entry point is the fully 'safe' one in which we check if EDX (the REF
+// begin updated) is actually in the GC heap
+NESTED_ENTRY JIT_CheckedWriteBarrier\rg, _TEXT, NoHandler
+ // check in the REF being updated is in the GC heap
+ push eax
+ PREPARE_EXTERNAL_VAR g_lowest_address, eax
+ cmp edx, [eax]
+ pop eax
+ jb LOCAL_LABEL(WriteBarrier_NotInHeap_\rg)
+ push eax
+ PREPARE_EXTERNAL_VAR g_highest_address, eax
+ cmp edx, [eax]
+ pop eax
+ jae LOCAL_LABEL(WriteBarrier_NotInHeap_\rg)
+
+ // fall through to unchecked routine
+ // note that its entry point also happens to be aligned
+
+#ifdef WRITE_BARRIER_CHECK
+ // This entry point is used when you know the REF pointer being updated
+ // is in the GC heap
+PATCH_LABEL JIT_DebugWriteBarrier\rg
+#endif // WRITE_BARRIER_CHECK
+
+#ifdef _DEBUG
+ push edx
+ push ecx
+ push eax
+
+ push \rg
+ push edx
+ call C_FUNC(WriteBarrierAssert)
+
+ pop eax
+ pop ecx
+ pop edx
+#endif // _DEBUG
+
+ // in the !WRITE_BARRIER_CHECK case this will be the move for all
+ // addresses in the GCHeap, addresses outside the GCHeap will get
+ // taken care of below at WriteBarrier_NotInHeap_&rg
+
+#ifndef WRITE_BARRIER_CHECK
+ mov DWORD PTR [edx], \rg
+#endif // !WRITE_BARRIER_CHECK
+
+#ifdef WRITE_BARRIER_CHECK
+ // Test dest here so if it is bad AV would happen before we change register/stack
+ // status. This makes job of AdjustContextForWriteBarrier easier.
+ cmp BYTE PTR [edx], 0
+ // ALSO update the shadow GC heap if that is enabled
+ // Make ebp into the temporary src register. We need to do this so that we can use ecx
+ // in the calculation of the shadow GC address, but still have access to the src register
+ push ecx
+ push ebp
+ mov ebp, \rg
+
+ // if g_GCShadow is 0, don't perform the check
+ push eax
+ PREPARE_EXTERNAL_VAR g_GCShadow, eax
+ cmp DWORD PTR [eax], 0
+ pop eax
+ je LOCAL_LABEL(WriteBarrier_NoShadow_\rg)
+
+ mov ecx, edx
+ push eax
+ PREPARE_EXTERNAL_VAR g_lowest_address, eax
+ sub ecx, [eax]
+ pop eax
+ jb LOCAL_LABEL(WriteBarrier_NoShadow_\rg)
+ push edx
+ PREPARE_EXTERNAL_VAR g_GCShadow, edx
+ mov [edx], edx
+ add ecx, [edx]
+ PREPARE_EXTERNAL_VAR g_GCShadowEnd, edx
+ mov [edx], edx
+ cmp ecx, [edx]
+ pop edx
+ ja LOCAL_LABEL(WriteBarrier_NoShadow_\rg)
+
+ // TODO: In Orcas timeframe if we move to P4+ only on X86 we should enable
+ // mfence barriers on either side of these two writes to make sure that
+ // they stay as close together as possible
+
+ // edx contains address in GC
+ // ecx contains address in ShadowGC
+ // ebp temporarially becomes the src register
+
+ // When we're writing to the shadow GC heap we want to be careful to minimize
+ // the risk of a race that can occur here where the GC and ShadowGC don't match
+ mov DWORD PTR [edx], ebp
+ mov DWORD PTR [ecx], ebp
+
+ // We need a scratch register to verify the shadow heap. We also need to
+ // construct a memory barrier so that the write to the shadow heap happens
+ // before the read from the GC heap. We can do both by using SUB/XCHG
+ // rather than PUSH.
+ //
+ // TODO: Should be changed to a push if the mfence described above is added.
+ //
+ sub esp, 4
+ xchg [esp], eax
+
+ // As part of our race avoidance (see above) we will now check whether the values
+ // in the GC and ShadowGC match. There is a possibility that we're wrong here but
+ // being overaggressive means we might mask a case where someone updates GC refs
+ // without going to a write barrier, but by its nature it will be indeterminant
+ // and we will find real bugs whereas the current implementation is indeterminant
+ // but only leads to investigations that find that this code is fundamentally flawed
+ mov eax, [edx]
+ cmp [ecx], eax
+ je LOCAL_LABEL(WriteBarrier_CleanupShadowCheck_\rg)
+ mov DWORD PTR [ecx], INVALIDGCVALUE
+
+LOCAL_LABEL(WriteBarrier_CleanupShadowCheck_\rg):
+ pop eax
+
+ jmp LOCAL_LABEL(WriteBarrier_ShadowCheckEnd_\rg)
+
+LOCAL_LABEL(WriteBarrier_NoShadow_\rg):
+ // If we come here then we haven't written the value to the GC and need to.
+ // ebp contains rg
+ // We restore ebp/ecx immediately after this, and if either of them is the src
+ // register it will regain its value as the src register.
+ mov DWORD PTR [edx], ebp
+LOCAL_LABEL(WriteBarrier_ShadowCheckEnd_\rg):
+ pop ebp
+ pop ecx
+#endif // WRITE_BARRIER_CHECK
+
+ push eax
+ push ebx
+ mov eax, \rg
+ PREPARE_EXTERNAL_VAR g_ephemeral_low, ebx
+ cmp eax, [ebx]
+ pop ebx
+ pop eax
+ jb LOCAL_LABEL(WriteBarrier_NotInEphemeral_\rg)
+ push eax
+ push ebx
+ mov eax, \rg
+ PREPARE_EXTERNAL_VAR g_ephemeral_high, ebx
+ cmp eax, [ebx]
+ pop ebx
+ pop eax
+ jae LOCAL_LABEL(WriteBarrier_NotInEphemeral_\rg)
+
+ shr edx, 10
+ push eax
+ PREPARE_EXTERNAL_VAR g_card_table, eax
+ add edx, [eax]
+ pop eax
+ cmp BYTE PTR [edx], 0FFh
+ jne LOCAL_LABEL(WriteBarrier_UpdateCardTable_\rg)
+ ret
+
+LOCAL_LABEL(WriteBarrier_UpdateCardTable_\rg):
+ mov BYTE PTR [edx], 0FFh
+ ret
+
+LOCAL_LABEL(WriteBarrier_NotInHeap_\rg):
+ // If it wasn't in the heap then we haven't updated the dst in memory yet
+ mov DWORD PTR [edx], \rg
+
+LOCAL_LABEL(WriteBarrier_NotInEphemeral_\rg):
+ // If it is in the GC Heap but isn't in the ephemeral range we've already
+ // updated the Heap with the Object*.
+ ret
+NESTED_END JIT_CheckedWriteBarrier\rg, _TEXT
+
+.endm
+
+
+// ***
+// JIT_ByRefWriteBarrier* - GC write barrier helper
+//
+// Purpose:
+// Helper calls in order to assign an object to a byref field
+// Enables book-keeping of the GC.
+//
+// Entry:
+// EDI - address of ref-field (assigned to)
+// ESI - address of the data (source)
+// ECX can be trashed
+//
+// Exit:
+//
+// Uses:
+// EDI and ESI are incremented by a DWORD
+//
+// Exceptions:
+//
+// *******************************************************************************
+//
+// The code here is tightly coupled with AdjustContextForWriteBarrier, if you change
+// anything here, you might need to change AdjustContextForWriteBarrier as well
+//
+.macro ByRefWriteBarrierHelper
+.align 4
+
+LEAF_ENTRY JIT_ByRefWriteBarrier, _TEXT
+ // test for dest in range
+ mov ecx, [esi]
+ push eax
+ PREPARE_EXTERNAL_VAR g_lowest_address, eax
+ cmp edi, [eax]
+ pop eax
+ jb LOCAL_LABEL(ByRefWriteBarrier_NotInHeap)
+ push eax
+ PREPARE_EXTERNAL_VAR g_highest_address, eax
+ cmp edi, [eax]
+ pop eax
+ jae LOCAL_LABEL(ByRefWriteBarrier_NotInHeap)
+
+#ifndef WRITE_BARRIER_CHECK
+ // write barrier
+ mov [edi], ecx
+#endif // !WRITE_BARRIER_CHECK
+
+#ifdef WRITE_BARRIER_CHECK
+ // Test dest here so if it is bad AV would happen before we change register/stack
+ // status. This makes job of AdjustContextForWriteBarrier easier.
+ cmp BYTE PTR [edi], 0
+
+ // ALSO update the shadow GC heap if that is enabled
+
+ // use edx for address in GC Shadow,
+ push edx
+
+ // if g_GCShadow is 0, don't do the update
+ push ebx
+ PREPARE_EXTERNAL_VAR g_GCShadow, ebx
+ cmp DWORD PTR [ebx], 0
+ pop ebx
+ je LOCAL_LABEL(ByRefWriteBarrier_NoShadow)
+
+ mov edx, edi
+ push ebx
+ PREPARE_EXTERNAL_VAR g_lowest_address, ebx
+ sub edx, [ebx] // U/V
+ pop ebx
+ jb LOCAL_LABEL(ByRefWriteBarrier_NoShadow)
+ push eax
+ PREPARE_EXTERNAL_VAR g_GCShadow, eax
+ mov eax, [eax]
+ add edx, [eax]
+ PREPARE_EXTERNAL_VAR g_GCShadowEnd, eax
+ mov eax, [eax]
+ cmp edx, [eax]
+ pop eax
+ ja LOCAL_LABEL(ByRefWriteBarrier_NoShadow)
+
+ // TODO: In Orcas timeframe if we move to P4+ only on X86 we should enable
+ // mfence barriers on either side of these two writes to make sure that
+ // they stay as close together as possible
+
+ // edi contains address in GC
+ // edx contains address in ShadowGC
+ // ecx is the value to assign
+
+ // When we're writing to the shadow GC heap we want to be careful to minimize
+ // the risk of a race that can occur here where the GC and ShadowGC don't match
+ mov DWORD PTR [edi], ecx
+ mov DWORD PTR [edx], ecx
+
+ // We need a scratch register to verify the shadow heap. We also need to
+ // construct a memory barrier so that the write to the shadow heap happens
+ // before the read from the GC heap. We can do both by using SUB/XCHG
+ // rather than PUSH.
+ //
+ // TODO: Should be changed to a push if the mfence described above is added.
+ //
+ sub esp, 4
+ xchg [esp], eax
+
+ // As part of our race avoidance (see above) we will now check whether the values
+ // in the GC and ShadowGC match. There is a possibility that we're wrong here but
+ // being overaggressive means we might mask a case where someone updates GC refs
+ // without going to a write barrier, but by its nature it will be indeterminant
+ // and we will find real bugs whereas the current implementation is indeterminant
+ // but only leads to investigations that find that this code is fundamentally flawed
+
+ mov eax, [edi]
+ cmp [edx], eax
+ je LOCAL_LABEL(ByRefWriteBarrier_CleanupShadowCheck)
+ mov DWORD PTR [edx], INVALIDGCVALUE
+LOCAL_LABEL(ByRefWriteBarrier_CleanupShadowCheck):
+ pop eax
+ jmp LOCAL_LABEL(ByRefWriteBarrier_ShadowCheckEnd)
+
+LOCAL_LABEL(ByRefWriteBarrier_NoShadow):
+ // If we come here then we haven't written the value to the GC and need to.
+ mov DWORD PTR [edi], ecx
+
+LOCAL_LABEL(ByRefWriteBarrier_ShadowCheckEnd):
+ pop edx
+#endif // WRITE_BARRIER_CHECK
+
+ // test for *src in ephemeral segement
+ push eax
+ PREPARE_EXTERNAL_VAR g_ephemeral_low, eax
+ cmp ecx, [eax]
+ pop eax
+ jb LOCAL_LABEL(ByRefWriteBarrier_NotInEphemeral)
+ push eax
+ PREPARE_EXTERNAL_VAR g_ephemeral_high, eax
+ cmp ecx, [eax]
+ pop eax
+ jae LOCAL_LABEL(ByRefWriteBarrier_NotInEphemeral)
+
+ mov ecx, edi
+ add esi, 4
+ add edi, 4
+
+ shr ecx, 10
+ push eax
+ PREPARE_EXTERNAL_VAR g_card_table, eax
+ add ecx, [eax]
+ pop eax
+ cmp BYTE PTR [ecx], 0FFh
+ jne LOCAL_LABEL(ByRefWriteBarrier_UpdateCardTable)
+ ret
+LOCAL_LABEL(ByRefWriteBarrier_UpdateCardTable):
+ mov BYTE PTR [ecx], 0FFh
+ ret
+
+LOCAL_LABEL(ByRefWriteBarrier_NotInHeap):
+ // If it wasn't in the heap then we haven't updated the dst in memory yet
+ mov [edi], ecx
+LOCAL_LABEL(ByRefWriteBarrier_NotInEphemeral):
+ // If it is in the GC Heap but isn't in the ephemeral range we've already
+ // updated the Heap with the Object*.
+ add esi, 4
+ add edi, 4
+ ret
+NESTED_END JIT_ByRefWriteBarrier, _TEXT
+
+.endm
+
+// WriteBarrierStart and WriteBarrierEnd are used to determine bounds of
+// WriteBarrier functions so can determine if got AV in them.
+//
+LEAF_ENTRY JIT_WriteBarrierStart, _TEXT
+ ret
+LEAF_END JIT_WriteBarrierStart, _TEXT
+
+#ifdef FEATURE_USE_ASM_GC_WRITE_BARRIERS
+// *******************************************************************************
+// Write barrier wrappers with fcall calling convention
+//
+.macro UniversalWriteBarrierHelper name
+.align 4
+
+LEAF_ENTRY JIT_\name, _TEXT
+ mov eax, edx
+ mov edx, ecx
+ jmp C_FUNC(JIT_\name\()EAX)
+LEAF_END JIT_\name, _TEXT
+
+.endm
+
+// Only define these if we're using the ASM GC write barriers; if this flag is not defined,
+// we'll use C++ versions of these write barriers.
+UniversalWriteBarrierHelper CheckedWriteBarrier
+UniversalWriteBarrierHelper WriteBarrier
+#endif // FEATURE_USE_ASM_GC_WRITE_BARRIERS
+
+WriteBarrierHelper EAX
+WriteBarrierHelper EBX
+WriteBarrierHelper ECX
+WriteBarrierHelper ESI
+WriteBarrierHelper EDI
+WriteBarrierHelper EBP
+
+ByRefWriteBarrierHelper
+
+LEAF_ENTRY JIT_WriteBarrierLast, _TEXT
+ ret
+LEAF_END JIT_WriteBarrierLast, _TEXT
+
+// This is the first function outside the "keep together range". Used by BBT scripts.
+LEAF_ENTRY JIT_WriteBarrierEnd, _TEXT
+ ret
+LEAF_END JIT_WriteBarrierEnd, _TEXT
+
+// *********************************************************************/
+// In cases where we support it we have an optimized GC Poll callback.
+// Normall (when we're not trying to suspend for GC, the CORINFO_HELP_POLL_GC
+// helper points to this nop routine. When we're ready to suspend for GC,
+// we whack the Jit Helper table entry to point to the real helper. When we're
+// done with GC we whack it back.
+LEAF_ENTRY JIT_PollGC_Nop, _TEXT
+ ret
+LEAF_END JIT_PollGC_Nop, _TEXT
+
+// *********************************************************************/
+// llshl - long shift left
+//
+// Purpose:
+// Does a Long Shift Left (signed and unsigned are identical)
+// Shifts a long left any number of bits.
+//
+// NOTE: This routine has been adapted from the Microsoft CRTs.
+//
+// Entry:
+// EDX:EAX - long value to be shifted
+// ECX - number of bits to shift by
+//
+// Exit:
+// EDX:EAX - shifted value
+//
+.align 16
+LEAF_ENTRY JIT_LLsh, _TEXT
+ cmp ecx, 32
+ jae LOCAL_LABEL(LLshMORE32)
+
+ // Handle shifts of between bits 0 and 31
+ shld edx, eax, cl
+ shl eax, cl
+ ret
+
+LOCAL_LABEL(LLshMORE32):
+ // Handle shifts of between bits 32 and 63
+ // The x86 shift instructions only use the lower 5 bits.
+ mov edx, eax
+ xor eax, eax
+ shl edx, cl
+ ret
+LEAF_END JIT_LLsh, _TEXT
+
+// *********************************************************************/
+// LRsh - long shift right
+//
+// Purpose:
+// Does a signed Long Shift Right
+// Shifts a long right any number of bits.
+//
+// NOTE: This routine has been adapted from the Microsoft CRTs.
+//
+// Entry:
+// EDX:EAX - long value to be shifted
+// ECX - number of bits to shift by
+//
+// Exit:
+// EDX:EAX - shifted value
+//
+.align 16
+LEAF_ENTRY JIT_LRsh, _TEXT
+ cmp ecx, 32
+ jae LOCAL_LABEL(LRshMORE32)
+
+ // Handle shifts of between bits 0 and 31
+ shrd eax, edx, cl
+ sar edx, cl
+ ret
+
+LOCAL_LABEL(LRshMORE32):
+ // Handle shifts of between bits 32 and 63
+ // The x86 shift instructions only use the lower 5 bits.
+ mov eax, edx
+ sar edx, 31
+ sar eax, cl
+ ret
+LEAF_END JIT_LRsh, _TEXT
+
+// *********************************************************************/
+// LRsz:
+// Purpose:
+// Does a unsigned Long Shift Right
+// Shifts a long right any number of bits.
+//
+// NOTE: This routine has been adapted from the Microsoft CRTs.
+//
+// Entry:
+// EDX:EAX - long value to be shifted
+// ECX - number of bits to shift by
+//
+// Exit:
+// EDX:EAX - shifted value
+//
+.align 16
+LEAF_ENTRY JIT_LRsz, _TEXT
+ cmp ecx, 32
+ jae LOCAL_LABEL(LRszMORE32)
+
+ // Handle shifts of between bits 0 and 31
+ shrd eax, edx, cl
+ shr edx, cl
+ ret
+
+LOCAL_LABEL(LRszMORE32):
+ // Handle shifts of between bits 32 and 63
+ // The x86 shift instructions only use the lower 5 bits.
+ mov eax, edx
+ xor edx, edx
+ shr eax, cl
+ ret
+LEAF_END JIT_LRsz, _TEXT
+
+// *********************************************************************/
+// JIT_Dbl2LngP4x87
+//
+// Purpose:
+// converts a double to a long truncating toward zero (C semantics)
+//
+// uses stdcall calling conventions
+//
+// This code is faster on a P4 than the Dbl2Lng code above, but is
+// slower on a PIII. Hence we choose this code when on a P4 or above.
+//
+LEAF_ENTRY JIT_Dbl2LngP4x87, _TEXT
+ // get some local space
+ sub esp, 8
+
+ #define arg1 [esp + 0Ch]
+ fld QWORD PTR arg1 // fetch arg
+ fnstcw WORD PTR arg1 // store FPCW
+ movzx eax, WORD PTR arg1 // zero extend - wide
+ or ah, 0Ch // turn on OE and DE flags
+ mov DWORD PTR [esp], eax // store new FPCW bits
+ fldcw WORD PTR [esp] // reload FPCW with new bits
+ fistp QWORD PTR [esp] // convert
+
+ // reload FP result
+ mov eax, DWORD PTR [esp]
+ mov edx, DWORD PTR [esp + 4]
+
+ // reload original FPCW value
+ fldcw WORD PTR arg1
+ #undef arg1
+
+ // restore stack
+ add esp, 8
+
+ ret 8
+LEAF_END JIT_Dbl2LngP4x87, _TEXT
+
+// *********************************************************************/
+// JIT_Dbl2LngSSE3
+//
+// Purpose:
+// converts a double to a long truncating toward zero (C semantics)
+//
+// uses stdcall calling conventions
+//
+// This code is faster than the above P4 x87 code for Intel processors
+// equal or later than Core2 and Atom that have SSE3 support
+//
+LEAF_ENTRY JIT_Dbl2LngSSE3, _TEXT
+ // get some local space
+ sub esp, 8
+
+ fld QWORD PTR [esp + 0Ch] // fetch arg
+ fisttp QWORD PTR [esp] // convert
+ mov eax, DWORD PTR [esp] // reload FP result
+ mov edx, DWORD PTR [esp + 4]
+
+ // restore stack
+ add esp, 8
+
+ ret 8
+LEAF_END JIT_Dbl2LngSSE3, _TEXT
+
+// *********************************************************************/
+// JIT_Dbl2IntSSE2
+//
+// Purpose:
+// converts a double to a long truncating toward zero (C semantics)
+//
+// uses stdcall calling conventions
+//
+// This code is even faster than the P4 x87 code for Dbl2LongP4x87,
+// but only returns a 32 bit value (only good for int).
+//
+LEAF_ENTRY JIT_Dbl2IntSSE2, _TEXT
+ movsd xmm0, [esp + 4]
+ cvttsd2si eax, xmm0
+ ret 8
+LEAF_END JIT_Dbl2IntSSE2, _TEXT
+
+// *********************************************************************/
+// This is the small write barrier thunk we use when we know the
+// ephemeral generation is higher in memory than older generations.
+// The 0x0F0F0F0F values are bashed by the two functions above.
+// This the generic version - wherever the code says ECX,
+// the specific register is patched later into a copy
+// Note: do not replace ECX by EAX - there is a smaller encoding for
+// the compares just for EAX, which won't work for other registers.
+//
+// READ THIS!!!!!!
+// it is imperative that the addresses of of the values that we overwrite
+// (card table, ephemeral region ranges, etc) are naturally aligned since
+// there are codepaths that will overwrite these values while the EE is running.
+//
+LEAF_ENTRY JIT_WriteBarrierReg_PreGrow, _TEXT
+ mov DWORD PTR [edx], ecx
+ cmp ecx, 0F0F0F0F0h
+ jb LOCAL_LABEL(NoWriteBarrierPre)
+
+ shr edx, 10
+ nop // padding for alignment of constant
+ cmp BYTE PTR [edx + 0F0F0F0F0h], 0FFh
+ jne LOCAL_LABEL(WriteBarrierPre)
+
+LOCAL_LABEL(NoWriteBarrierPre):
+ ret
+ nop // padding for alignment of constant
+ nop // padding for alignment of constant
+
+LOCAL_LABEL(WriteBarrierPre):
+ mov BYTE PTR [edx+0F0F0F0F0h], 0FFh
+ ret
+LEAF_END JIT_WriteBarrierReg_PreGrow, _TEXT
+
+// *********************************************************************/
+// This is the larger write barrier thunk we use when we know that older
+// generations may be higher in memory than the ephemeral generation
+// The 0x0F0F0F0F values are bashed by the two functions above.
+// This the generic version - wherever the code says ECX,
+// the specific register is patched later into a copy
+// Note: do not replace ECX by EAX - there is a smaller encoding for
+// the compares just for EAX, which won't work for other registers.
+// NOTE: we need this aligned for our validation to work properly
+.align 4
+LEAF_ENTRY JIT_WriteBarrierReg_PostGrow, _TEXT
+ mov DWORD PTR [edx], ecx
+ cmp ecx, 0F0F0F0F0h
+ jb LOCAL_LABEL(NoWriteBarrierPost)
+ cmp ecx, 0F0F0F0F0h
+ jae LOCAL_LABEL(NoWriteBarrierPost)
+
+ shr edx, 10
+ nop // padding for alignment of constant
+ cmp BYTE PTR [edx + 0F0F0F0F0h], 0FFh
+ jne LOCAL_LABEL(WriteBarrierPost)
+
+LOCAL_LABEL(NoWriteBarrierPost):
+ ret
+ nop // padding for alignment of constant
+ nop // padding for alignment of constant
+
+LOCAL_LABEL(WriteBarrierPost):
+ mov BYTE PTR [edx + 0F0F0F0F0h], 0FFh
+ ret
+LEAF_END JIT_WriteBarrierReg_PostGrow,_TEXT
+
+// PatchedCodeStart and PatchedCodeEnd are used to determine bounds of patched code.
+//
+
+LEAF_ENTRY JIT_PatchedCodeStart, _TEXT
+ ret
+LEAF_END JIT_PatchedCodeStart, _TEXT
+
+// **********************************************************************
+// Write barriers generated at runtime
+
+LEAF_ENTRY JIT_PatchedWriteBarrierStart, _TEXT
+ ret
+LEAF_END JIT_PatchedWriteBarrierStart, _TEXT
+
+.macro PatchedWriteBarrierHelper rg
+.align 8
+LEAF_ENTRY JIT_WriteBarrier\rg, _TEXT
+ // Just allocate space that will be filled in at runtime
+ .space 0CCH, 48
+LEAF_END JIT_WriteBarrier\rg, _TEXT
+
+.endm
+
+PatchedWriteBarrierHelper EAX
+PatchedWriteBarrierHelper EBX
+PatchedWriteBarrierHelper ECX
+PatchedWriteBarrierHelper ESI
+PatchedWriteBarrierHelper EDI
+PatchedWriteBarrierHelper EBP
+
+LEAF_ENTRY JIT_PatchedWriteBarrierLast, _TEXT
+ ret
+LEAF_END JIT_PatchedWriteBarrierLast, _TEXT
+
+LEAF_ENTRY JIT_PatchedCodeLast, _TEXT
+ ret
+LEAF_END JIT_PatchedCodeLast, _TEXT
+
+// This is the first function outside the "keep together range". Used by BBT scripts.
+LEAF_ENTRY JIT_PatchedCodeEnd, _TEXT
+ ret
+LEAF_END JIT_PatchedCodeEnd, _TEXT
+
+// Note that the debugger skips this entirely when doing SetIP,
+// since COMPlusCheckForAbort should always return 0. Excep.cpp:LeaveCatch
+// asserts that to be true. If this ends up doing more work, then the
+// debugger may need additional support.
+// void __stdcall JIT_EndCatch();
+NESTED_ENTRY JIT_EndCatch, _TEXT, NoHandler
+ // make temp storage for return address, and push the address of that
+ // as the last arg to COMPlusEndCatch
+ mov ecx, [esp]
+ push ecx
+ push esp
+
+ // push the rest of COMPlusEndCatch's args, right-to-left
+ push esi
+ push edi
+ push ebx
+ push ebp
+
+ // returns old esp value in eax, stores jump address
+ call C_FUNC(COMPlusEndCatch)
+ // now eax = new esp, [esp] = new eip
+
+ pop edx // edx = new eip
+ mov esp, eax // esp = new esp
+ jmp edx // eip = new eip
+NESTED_END JIT_EndCatch, _TEXT
diff --git a/src/vm/i386/jithelp.asm b/src/vm/i386/jithelp.asm
index ac767287ee..9d2f6b7589 100644
--- a/src/vm/i386/jithelp.asm
+++ b/src/vm/i386/jithelp.asm
@@ -92,6 +92,8 @@ EXTERN _TransparentProxyStub_CrossContext@0:PROC
EXTERN _InContextTPQuickDispatchAsmStub@0:PROC
endif
+EXTERN _COMPlusEndCatch@20:PROC
+
.686P
.XMM
; The following macro is needed because of a MASM issue with the
@@ -2571,4 +2573,32 @@ ChkCastInterfaceIsNullInst:
@JIT_ChkCastInterface@8 endp
+; Note that the debugger skips this entirely when doing SetIP,
+; since COMPlusCheckForAbort should always return 0. Excep.cpp:LeaveCatch
+; asserts that to be true. If this ends up doing more work, then the
+; debugger may need additional support.
+; void __stdcall JIT_EndCatch();
+JIT_EndCatch PROC stdcall public
+
+ ; make temp storage for return address, and push the address of that
+ ; as the last arg to COMPlusEndCatch
+ mov ecx, [esp]
+ push ecx;
+ push esp;
+
+ ; push the rest of COMPlusEndCatch's args, right-to-left
+ push esi
+ push edi
+ push ebx
+ push ebp
+
+ call _COMPlusEndCatch@20 ; returns old esp value in eax, stores jump address
+ ; now eax = new esp, [esp] = new eip
+
+ pop edx ; edx = new eip
+ mov esp, eax ; esp = new esp
+ jmp edx ; eip = new eip
+
+JIT_EndCatch ENDP
+
end
diff --git a/src/vm/i386/jitinterfacex86.cpp b/src/vm/i386/jitinterfacex86.cpp
index 949b115ce2..a80b5e6325 100644
--- a/src/vm/i386/jitinterfacex86.cpp
+++ b/src/vm/i386/jitinterfacex86.cpp
@@ -57,10 +57,10 @@ extern "C" void STDCALL WriteBarrierAssert(BYTE* ptr, Object* obj)
if (fVerifyHeap)
{
obj->Validate(FALSE);
- if(GCHeap::GetGCHeap()->IsHeapPointer(ptr))
+ if(GCHeapUtilities::GetGCHeap()->IsHeapPointer(ptr))
{
Object* pObj = *(Object**)ptr;
- _ASSERTE (pObj == NULL || GCHeap::GetGCHeap()->IsHeapPointer(pObj));
+ _ASSERTE (pObj == NULL || GCHeapUtilities::GetGCHeap()->IsHeapPointer(pObj));
}
}
else
@@ -72,6 +72,7 @@ extern "C" void STDCALL WriteBarrierAssert(BYTE* ptr, Object* obj)
#endif // _DEBUG
+#ifndef FEATURE_PAL
/****************************************************************************/
/* assigns 'val to 'array[idx], after doing all the proper checks */
@@ -330,7 +331,9 @@ extern "C" __declspec(naked) Object* F_CALL_CONV JIT_ChkCastClassSpecial(MethodT
jmp JITutil_ChkCastAny
}
}
+#endif // FEATURE_PAL
+#ifndef FEATURE_PAL
HCIMPL1_V(INT32, JIT_Dbl2IntOvf, double val)
{
FCALL_CONTRACT;
@@ -346,6 +349,7 @@ THROW:
FCThrow(kOverflowException);
}
HCIMPLEND
+#endif // FEATURE_PAL
FCDECL1(Object*, JIT_New, CORINFO_CLASS_HANDLE typeHnd_);
@@ -610,7 +614,7 @@ void JIT_TrialAlloc::EmitCore(CPUSTUBLINKER *psl, CodeLabel *noLock, CodeLabel *
if (flags & (ALIGN8 | SIZE_IN_EAX | ALIGN8OBJ))
{
// MOV EBX, [edx]Thread.m_alloc_context.alloc_ptr
- psl->X86EmitOffsetModRM(0x8B, kEBX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr));
+ psl->X86EmitOffsetModRM(0x8B, kEBX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_ptr));
// add EAX, EBX
psl->Emit16(0xC303);
if (flags & ALIGN8)
@@ -619,11 +623,11 @@ void JIT_TrialAlloc::EmitCore(CPUSTUBLINKER *psl, CodeLabel *noLock, CodeLabel *
else
{
// add eax, [edx]Thread.m_alloc_context.alloc_ptr
- psl->X86EmitOffsetModRM(0x03, kEAX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr));
+ psl->X86EmitOffsetModRM(0x03, kEAX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_ptr));
}
// cmp eax, [edx]Thread.m_alloc_context.alloc_limit
- psl->X86EmitOffsetModRM(0x3b, kEAX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_limit));
+ psl->X86EmitOffsetModRM(0x3b, kEAX, kEDX, offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_limit));
// ja noAlloc
psl->X86EmitCondJump(noAlloc, X86CondCode::kJA);
@@ -631,7 +635,7 @@ void JIT_TrialAlloc::EmitCore(CPUSTUBLINKER *psl, CodeLabel *noLock, CodeLabel *
// Fill in the allocation and get out.
// mov [edx]Thread.m_alloc_context.alloc_ptr, eax
- psl->X86EmitIndexRegStore(kEDX, offsetof(Thread, m_alloc_context) + offsetof(alloc_context, alloc_ptr), kEAX);
+ psl->X86EmitIndexRegStore(kEDX, offsetof(Thread, m_alloc_context) + offsetof(gc_alloc_context, alloc_ptr), kEAX);
if (flags & (ALIGN8 | SIZE_IN_EAX | ALIGN8OBJ))
{
@@ -1502,7 +1506,7 @@ void InitJITHelpers1()
_ASSERTE(g_SystemInfo.dwNumberOfProcessors != 0);
- JIT_TrialAlloc::Flags flags = GCHeap::UseAllocationContexts() ?
+ JIT_TrialAlloc::Flags flags = GCHeapUtilities::UseAllocationContexts() ?
JIT_TrialAlloc::MP_ALLOCATOR : JIT_TrialAlloc::NORMAL;
// Get CPU features and check for SSE2 support.
diff --git a/src/vm/i386/stublinkerx86.cpp b/src/vm/i386/stublinkerx86.cpp
index 0037a7d3e6..63b9e87367 100644
--- a/src/vm/i386/stublinkerx86.cpp
+++ b/src/vm/i386/stublinkerx86.cpp
@@ -64,6 +64,7 @@ extern "C" HRESULT __cdecl StubRareDisableHR(Thread *pThread);
#endif // FEATURE_COMINTEROP
extern "C" VOID __cdecl StubRareDisableTHROW(Thread *pThread, Frame *pFrame);
+#ifndef FEATURE_ARRAYSTUB_AS_IL
extern "C" VOID __cdecl ArrayOpStubNullException(void);
extern "C" VOID __cdecl ArrayOpStubRangeException(void);
extern "C" VOID __cdecl ArrayOpStubTypeMismatchException(void);
@@ -78,10 +79,13 @@ EXCEPTION_HELPERS(ArrayOpStubNullException);
EXCEPTION_HELPERS(ArrayOpStubRangeException);
EXCEPTION_HELPERS(ArrayOpStubTypeMismatchException);
#undef EXCEPTION_HELPERS
+#endif // !_TARGET_AMD64_
+#endif // !FEATURE_ARRAYSTUB_AS_IL
-#if defined(_DEBUG)
+#if defined(_TARGET_AMD64_)
+#if defined(_DEBUG)
extern "C" VOID __cdecl DebugCheckStubUnwindInfo();
-#endif
+#endif // _DEBUG
#endif // _TARGET_AMD64_
// Presumably this code knows what it is doing with TLS. If we are hiding these
@@ -2535,7 +2539,7 @@ VOID StubLinkerCPU::X86EmitCurrentAppDomainFetch(X86Reg dstreg, unsigned preserv
#endif // FEATURE_IMPLICIT_TLS
}
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_)
#ifdef PROFILING_SUPPORTED
VOID StubLinkerCPU::EmitProfilerComCallProlog(TADDR pFrameVptr, X86Reg regFrame)
@@ -2624,6 +2628,7 @@ VOID StubLinkerCPU::EmitProfilerComCallEpilog(TADDR pFrameVptr, X86Reg regFrame)
#endif // PROFILING_SUPPORTED
+#ifndef FEATURE_STUBS_AS_IL
//========================================================================
// Prolog for entering managed code from COM
// pushes the appropriate frame ptr
@@ -2850,6 +2855,7 @@ void StubLinkerCPU::EmitComMethodStubEpilog(TADDR pFrameVptr,
EmitLabel(rgRareLabels[0]); // label for rare setup thread
EmitRareSetup(rgRejoinLabels[0], /*fThrow*/ TRUE); // emit rare setup thread
}
+#endif // !FEATURE_STUBS_AS_IL
//---------------------------------------------------------------
// Emit code to store the setup current Thread structure in eax.
@@ -2882,6 +2888,7 @@ VOID StubLinkerCPU::EmitSetup(CodeLabel *pForwardRef)
switch (mode)
{
case TLSACCESS_WNT:
+#ifndef FEATURE_PAL
{
unsigned __int32 tlsofs = offsetof(TEB, TlsSlots) + (idx * sizeof(void*));
@@ -2889,6 +2896,9 @@ VOID StubLinkerCPU::EmitSetup(CodeLabel *pForwardRef)
EmitBytes(code, sizeof(code));
Emit32(tlsofs);
}
+#else // !FEATURE_PAL
+ _ASSERTE("TLSACCESS_WNT mode is not supported");
+#endif // !FEATURE_PAL
break;
case TLSACCESS_GENERIC:
@@ -2919,7 +2929,6 @@ VOID StubLinkerCPU::EmitSetup(CodeLabel *pForwardRef)
X86EmitDebugTrashReg(kECX);
X86EmitDebugTrashReg(kEDX);
#endif
-
}
VOID StubLinkerCPU::EmitRareSetup(CodeLabel *pRejoinPoint, BOOL fThrow)
@@ -4824,8 +4833,9 @@ VOID StubLinkerCPU::EmitSecureDelegateInvoke(UINT_PTR hash)
// Epilog
EmitMethodStubEpilog(numStackBytes, SecureDelegateFrame::GetOffsetOfTransitionBlock());
}
+#endif // !CROSSGEN_COMPILE && !FEATURE_STUBS_AS_IL
-#ifndef FEATURE_ARRAYSTUB_AS_IL
+#if !defined(CROSSGEN_COMPILE) && !defined(FEATURE_ARRAYSTUB_AS_IL)
// Little helper to generate code to move nbytes bytes of non Ref memory
@@ -5768,8 +5778,9 @@ COPY_VALUE_CLASS:
#pragma warning(pop)
#endif
-#endif // FEATURE_ARRAYSTUB_AS_IL
+#endif // !CROSSGEN_COMPILE && !FEATURE_ARRAYSTUB_AS_IL
+#if !defined(CROSSGEN_COMPILE) && !defined(FEATURE_STUBS_AS_IL)
//===========================================================================
// Emits code to break into debugger
VOID StubLinkerCPU::EmitDebugBreak()
@@ -5841,9 +5852,9 @@ Thread* __stdcall CreateThreadBlockReturnHr(ComMethodFrame *pFrame)
#pragma warning(pop)
#endif
-#endif // defined(FEATURE_COMINTEROP) && defined(_TARGET_X86_)
+#endif // FEATURE_COMINTEROP && _TARGET_X86_
-#endif // !defined(CROSSGEN_COMPILE) && !defined(FEATURE_STUBS_AS_IL)
+#endif // !CROSSGEN_COMPILE && !FEATURE_STUBS_AS_IL
#endif // !DACCESS_COMPILE
diff --git a/src/vm/i386/stublinkerx86.h b/src/vm/i386/stublinkerx86.h
index 237fc794d4..e361833a1e 100644
--- a/src/vm/i386/stublinkerx86.h
+++ b/src/vm/i386/stublinkerx86.h
@@ -345,6 +345,11 @@ class StubLinkerCPU : public StubLinker
VOID EmitSetup(CodeLabel *pForwardRef);
VOID EmitRareSetup(CodeLabel* pRejoinPoint, BOOL fThrow);
+
+#ifndef FEATURE_STUBS_AS_IL
+ VOID EmitMethodStubProlog(TADDR pFrameVptr, int transitionBlockOffset);
+ VOID EmitMethodStubEpilog(WORD numArgBytes, int transitionBlockOffset);
+
VOID EmitCheckGSCookie(X86Reg frameReg, int gsCookieOffset);
#ifdef _TARGET_X86_
@@ -353,10 +358,8 @@ class StubLinkerCPU : public StubLinker
void EmitComMethodStubEpilog(TADDR pFrameVptr, CodeLabel** rgRareLabels,
CodeLabel** rgRejoinLabels, BOOL bShouldProfile);
-#endif
-
- VOID EmitMethodStubProlog(TADDR pFrameVptr, int transitionBlockOffset);
- VOID EmitMethodStubEpilog(WORD numArgBytes, int transitionBlockOffset);
+#endif // _TARGET_X86_
+#endif // !FEATURE_STUBS_AS_IL
VOID EmitUnboxMethodStub(MethodDesc* pRealMD);
#if defined(FEATURE_SHARE_GENERIC_CODE)
@@ -374,13 +377,16 @@ class StubLinkerCPU : public StubLinker
BOOL bShouldProfile);
#endif // FEATURE_COMINTEROP && _TARGET_X86_
+#ifndef FEATURE_STUBS_AS_IL
//===========================================================================
// Computes hash code for MulticastDelegate.Invoke()
static UINT_PTR HashMulticastInvoke(MetaSig* pSig);
+#ifdef _TARGET_X86_
//===========================================================================
// Emits code for Delegate.Invoke() any delegate type
VOID EmitDelegateInvoke();
+#endif // _TARGET_X86_
//===========================================================================
// Emits code for MulticastDelegate.Invoke() - sig specific
@@ -389,22 +395,27 @@ class StubLinkerCPU : public StubLinker
//===========================================================================
// Emits code for Delegate.Invoke() on delegates that recorded creator assembly
VOID EmitSecureDelegateInvoke(UINT_PTR hash);
+#endif // !FEATURE_STUBS_AS_IL
//===========================================================================
// Emits code to adjust for a static delegate target.
VOID EmitShuffleThunk(struct ShuffleEntry *pShuffleEntryArray);
+#ifndef FEATURE_ARRAYSTUB_AS_IL
//===========================================================================
// Emits code to do an array operation.
VOID EmitArrayOpStub(const ArrayOpScript*);
//Worker function to emit throw helpers for array ops.
VOID EmitArrayOpStubThrow(unsigned exConst, unsigned cbRetArg);
+#endif
+#ifndef FEATURE_STUBS_AS_IL
//===========================================================================
// Emits code to break into debugger
VOID EmitDebugBreak();
+#endif // !FEATURE_STUBS_AS_IL
#if defined(_DEBUG) && (defined(_TARGET_AMD64_) || defined(_TARGET_X86_)) && !defined(FEATURE_PAL)
//===========================================================================
diff --git a/src/vm/i386/umthunkstub.S b/src/vm/i386/umthunkstub.S
new file mode 100644
index 0000000000..728964bdb6
--- /dev/null
+++ b/src/vm/i386/umthunkstub.S
@@ -0,0 +1,177 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+.intel_syntax noprefix
+#include "unixasmmacros.inc"
+#include "asmconstants.h"
+
+//
+// eax = UMEntryThunk*
+//
+NESTED_ENTRY TheUMEntryPrestub, _TEXT, UnhandledExceptionHandlerUnix
+ // Preserve argument registers
+ push ecx
+ push edx
+
+ push eax // UMEntryThunk*
+ call C_FUNC(TheUMEntryPrestubWorker)
+ pop edx
+ // eax = PCODE
+
+ // Restore argument registers
+ pop edx
+ pop ecx
+
+ jmp eax // Tail Jmp
+NESTED_END TheUMEntryPrestub, _TEXT
+
+//
+// eax: UMEntryThunk*
+//
+NESTED_ENTRY UMThunkStub, _TEXT, UnhandledExceptionHandlerUnix
+
+#define UMThunkStub_SAVEDREG (3*4) // ebx, esi, edi
+#define UMThunkStub_LOCALVARS (2*4) // UMEntryThunk*, Thread*
+#define UMThunkStub_INT_ARG_SPILL (2*4) // for save ecx, edx
+#define UMThunkStub_UMENTRYTHUNK_OFFSET (UMThunkStub_SAVEDREG+4)
+#define UMThunkStub_THREAD_OFFSET (UMThunkStub_UMENTRYTHUNK_OFFSET+4)
+#define UMThunkStub_INT_ARG_OFFSET (UMThunkStub_THREAD_OFFSET+4)
+#define UMThunkStub_FIXEDALLOCSIZE (UMThunkStub_LOCALVARS+UMThunkStub_INT_ARG_SPILL)
+
+// return address <-- entry ESP
+// saved ebp <-- EBP
+// saved ebx
+// saved esi
+// saved edi
+// UMEntryThunk*
+// Thread*
+// save ecx
+// save edx
+// {optional stack args passed to callee} <-- new esp
+
+ PROLOG_BEG
+ PROLOG_PUSH ebx
+ PROLOG_PUSH esi
+ PROLOG_PUSH edi
+ PROLOG_END
+ sub esp, UMThunkStub_FIXEDALLOCSIZE
+
+ mov dword ptr [ebp - UMThunkStub_INT_ARG_OFFSET], ecx
+ mov dword ptr [ebp - UMThunkStub_INT_ARG_OFFSET - 0x04], edx
+
+ mov dword ptr [ebp - UMThunkStub_UMENTRYTHUNK_OFFSET], eax
+
+ call C_FUNC(GetThread)
+ test eax, eax
+ jz LOCAL_LABEL(DoThreadSetup)
+
+LOCAL_LABEL(HaveThread):
+
+ mov dword ptr [ebp - UMThunkStub_THREAD_OFFSET], eax
+
+ // FailFast if a native callable method is invoked via ldftn and calli.
+ cmp dword ptr [eax + Thread_m_fPreemptiveGCDisabled], 1
+ jz LOCAL_LABEL(InvalidTransition)
+
+ // disable preemptive GC
+ mov dword ptr [eax + Thread_m_fPreemptiveGCDisabled], 1
+
+ // catch returning thread here if a GC is in progress
+ PREPARE_EXTERNAL_VAR g_TrapReturningThreads, eax
+ cmp eax, 0
+ jnz LOCAL_LABEL(DoTrapReturningThreadsTHROW)
+
+LOCAL_LABEL(InCooperativeMode):
+
+#if _DEBUG
+ mov eax, dword ptr [ebp - UMThunkStub_THREAD_OFFSET]
+ mov eax, dword ptr [eax + Thread__m_pDomain]
+ mov esi, dword ptr [eax + AppDomain__m_dwId]
+
+ mov eax, dword ptr [ebp - UMThunkStub_UMENTRYTHUNK_OFFSET]
+ mov edi, dword ptr [eax + UMEntryThunk__m_dwDomainId]
+
+ cmp esi, edi
+ jne LOCAL_LABEL(WrongAppDomain)
+#endif
+
+ mov eax, dword ptr [ebp - UMThunkStub_UMENTRYTHUNK_OFFSET]
+ mov ebx, dword ptr [eax + UMEntryThunk__m_pUMThunkMarshInfo]
+ mov eax, dword ptr [ebx + UMThunkMarshInfo__m_cbActualArgSize]
+ test eax, eax
+ jnz LOCAL_LABEL(UMThunkStub_CopyStackArgs)
+
+LOCAL_LABEL(UMThunkStub_ArgumentsSetup):
+
+ mov ecx, dword ptr [ebp - UMThunkStub_INT_ARG_OFFSET]
+ mov edx, dword ptr [ebp - UMThunkStub_INT_ARG_OFFSET - 0x04]
+
+ mov eax, dword ptr [ebp - UMThunkStub_UMENTRYTHUNK_OFFSET]
+ mov ebx, dword ptr [eax + UMEntryThunk__m_pUMThunkMarshInfo]
+ mov ebx, dword ptr [ebx + UMThunkMarshInfo__m_pILStub]
+
+ call ebx
+
+LOCAL_LABEL(PostCall):
+
+ mov ebx, dword ptr [ebp - UMThunkStub_THREAD_OFFSET]
+ mov dword ptr [ebx + Thread_m_fPreemptiveGCDisabled], 0
+
+ lea esp, [ebp - UMThunkStub_SAVEDREG] // deallocate arguments
+ EPILOG_BEG
+ EPILOG_POP edi
+ EPILOG_POP esi
+ EPILOG_POP ebx
+ EPILOG_END
+ ret
+
+LOCAL_LABEL(DoThreadSetup):
+
+ call C_FUNC(CreateThreadBlockThrow)
+ jmp LOCAL_LABEL(HaveThread)
+
+LOCAL_LABEL(InvalidTransition):
+
+ //No arguments to setup , ReversePInvokeBadTransition will failfast
+ call C_FUNC(ReversePInvokeBadTransition)
+
+LOCAL_LABEL(DoTrapReturningThreadsTHROW):
+
+ // extern "C" VOID STDCALL UMThunkStubRareDisableWorker(Thread *pThread, UMEntryThunk *pUMEntryThunk)
+ mov eax, dword ptr [ebp - UMThunkStub_UMENTRYTHUNK_OFFSET]
+ push eax
+ mov eax, dword ptr [ebp - UMThunkStub_THREAD_OFFSET]
+ push eax
+ call C_FUNC(UMThunkStubRareDisableWorker)
+
+ jmp LOCAL_LABEL(InCooperativeMode)
+
+LOCAL_LABEL(UMThunkStub_CopyStackArgs):
+
+ // eax = m_cbActualArgSize
+ sub esp, eax
+ and esp, -16 // align with 16 byte
+ lea esi, [ebp + 0x08]
+ lea edi, [esp]
+
+LOCAL_LABEL(CopyLoop):
+
+ // eax = number of bytes
+ // esi = src
+ // edi = dest
+ // edx = sratch
+
+ add eax, -4
+ mov edx, dword ptr [esi + eax]
+ mov dword ptr [edi + eax], edx
+ jnz LOCAL_LABEL(CopyLoop)
+
+ jmp LOCAL_LABEL(UMThunkStub_ArgumentsSetup)
+
+#if _DEBUG
+LOCAL_LABEL(WrongAppDomain):
+ int3
+#endif
+
+NESTED_END UMThunkStub, _TEXT
diff --git a/src/vm/i386/unixstubs.cpp b/src/vm/i386/unixstubs.cpp
new file mode 100644
index 0000000000..9fe7127946
--- /dev/null
+++ b/src/vm/i386/unixstubs.cpp
@@ -0,0 +1,106 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+#include "common.h"
+
+extern "C"
+{
+ void ThrowControlForThread()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void NakedThrowHelper()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void PInvokeStubForHost()
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void PInvokeStubForHostInner(DWORD dwStackSize, LPVOID pStackFrame, LPVOID pTarget)
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void ProfileEnterNaked(FunctionIDOrClientID functionIDOrClientID)
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void ProfileLeaveNaked(FunctionIDOrClientID functionIDOrClientID)
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void ProfileTailcallNaked(FunctionIDOrClientID functionIDOrClientID)
+ {
+ PORTABILITY_ASSERT("Implement for PAL");
+ }
+
+ void STDCALL JIT_ProfilerEnterLeaveTailcallStub(UINT_PTR ProfilerHandle)
+ {
+ }
+
+ _Unwind_Reason_Code
+ UnhandledExceptionHandlerUnix(
+ IN int version,
+ IN _Unwind_Action action,
+ IN uint64_t exceptionClass,
+ IN struct _Unwind_Exception *exception,
+ IN struct _Unwind_Context *context
+ )
+ {
+ PORTABILITY_ASSERT("UnhandledExceptionHandlerUnix");
+ return _URC_FATAL_PHASE1_ERROR;
+ }
+
+ BOOL CallRtlUnwind()
+ {
+ PORTABILITY_ASSERT("CallRtlUnwind");
+ return FALSE;
+ }
+};
+
+VOID __cdecl PopSEHRecords(LPVOID pTargetSP)
+{
+ PORTABILITY_ASSERT("Implement for PAL");
+}
+
+EXTERN_C VOID SinglecastDelegateInvokeStub()
+{
+ PORTABILITY_ASSERT("SinglecastDelegateInvokeStub");
+}
+
+EXTERN_C VOID ResolveWorkerChainLookupAsmStub()
+{
+ PORTABILITY_ASSERT("ResolveWorkerChainLookupAsmStub");
+}
+
+EXTERN_C VOID BackPatchWorkerAsmStub()
+{
+ PORTABILITY_ASSERT("BackPatchWorkerAsmStub");
+}
+
+EXTERN_C VOID JIT_TailCall()
+{
+ PORTABILITY_ASSERT("JIT_TailCall");
+}
+
+EXTERN_C VOID JIT_TailCallReturnFromVSD()
+{
+ PORTABILITY_ASSERT("JIT_TailCallReturnFromVSD");
+}
+
+EXTERN_C VOID JIT_TailCallVSDLeave()
+{
+ PORTABILITY_ASSERT("JIT_TailCallVSDLeave");
+}
+
+EXTERN_C VOID JIT_TailCallLeave()
+{
+ PORTABILITY_ASSERT("JIT_TailCallLeave");
+}
diff --git a/src/vm/i386/virtualcallstubcpu.hpp b/src/vm/i386/virtualcallstubcpu.hpp
index 33ce8199b9..8c16854d22 100644
--- a/src/vm/i386/virtualcallstubcpu.hpp
+++ b/src/vm/i386/virtualcallstubcpu.hpp
@@ -695,7 +695,7 @@ BOOL isDelegateCall(BYTE *interiorPtr)
{
LIMITED_METHOD_CONTRACT;
- if (GCHeap::GetGCHeap()->IsHeapPointer((void*)interiorPtr))
+ if (GCHeapUtilities::GetGCHeap()->IsHeapPointer((void*)interiorPtr))
{
Object *delegate = (Object*)(interiorPtr - DelegateObject::GetOffsetOfMethodPtrAux());
VALIDATEOBJECTREF(ObjectToOBJECTREF(delegate));
diff --git a/src/vm/ilmarshalers.cpp b/src/vm/ilmarshalers.cpp
index 114fbe3ccb..c44e561df3 100644
--- a/src/vm/ilmarshalers.cpp
+++ b/src/vm/ilmarshalers.cpp
@@ -4177,7 +4177,7 @@ void ILNativeArrayMarshaler::EmitConvertSpaceNativeToCLR(ILCodeStream* pslILEmit
if (IsByref(m_dwMarshalFlags))
{
//
- // Reset the element count just in case there is a exception thrown in the code emitted by
+ // Reset the element count just in case there is an exception thrown in the code emitted by
// EmitLoadElementCount. The best thing we can do here is to avoid a crash.
//
_ASSERTE(m_dwSavedSizeArg != LOCAL_NUM_UNUSED);
diff --git a/src/vm/ilstubcache.cpp b/src/vm/ilstubcache.cpp
index 4343ba819f..9cd904aec7 100644
--- a/src/vm/ilstubcache.cpp
+++ b/src/vm/ilstubcache.cpp
@@ -128,7 +128,7 @@ MethodDesc* ILStubCache::CreateAndLinkNewILStubMethodDesc(LoaderAllocator* pAllo
pStubLinker->GenerateCode(pbBuffer, cbCode);
pStubLinker->GetLocalSig(pbLocalSig, cbSig);
- pResolver->SetJitFlags(CORJIT_FLG_IL_STUB);
+ pResolver->SetJitFlags(CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_IL_STUB));
}
pResolver->SetTokenLookupMap(pStubLinker->GetTokenLookupMap());
diff --git a/src/vm/ilstubresolver.cpp b/src/vm/ilstubresolver.cpp
index 64ff99f67e..5ba6c8a3b0 100644
--- a/src/vm/ilstubresolver.cpp
+++ b/src/vm/ilstubresolver.cpp
@@ -299,7 +299,7 @@ ILStubResolver::ILStubResolver() :
m_pStubMD(dac_cast<PTR_MethodDesc>(nullptr)),
m_pStubTargetMD(dac_cast<PTR_MethodDesc>(nullptr)),
m_type(Unassigned),
- m_dwJitFlags(0)
+ m_jitFlags()
{
LIMITED_METHOD_CONTRACT;
@@ -488,16 +488,16 @@ bool ILStubResolver::IsILGenerated()
return (dac_cast<TADDR>(m_pCompileTimeState) != ILNotYetGenerated);
}
-void ILStubResolver::SetJitFlags(DWORD dwFlags)
+void ILStubResolver::SetJitFlags(CORJIT_FLAGS jitFlags)
{
LIMITED_METHOD_CONTRACT;
- m_dwJitFlags = dwFlags;
+ m_jitFlags = jitFlags;
}
-DWORD ILStubResolver::GetJitFlags()
+CORJIT_FLAGS ILStubResolver::GetJitFlags()
{
LIMITED_METHOD_CONTRACT;
- return m_dwJitFlags;
+ return m_jitFlags;
}
// static
diff --git a/src/vm/ilstubresolver.h b/src/vm/ilstubresolver.h
index b100931107..47181c8a94 100644
--- a/src/vm/ilstubresolver.h
+++ b/src/vm/ilstubresolver.h
@@ -64,8 +64,8 @@ public:
void SetTokenLookupMap(TokenLookupMap* pMap);
- void SetJitFlags(DWORD dwJitFlags);
- DWORD GetJitFlags();
+ void SetJitFlags(CORJIT_FLAGS jitFlags);
+ CORJIT_FLAGS GetJitFlags();
static void StubGenFailed(ILStubResolver* pResolver);
@@ -116,7 +116,7 @@ protected:
PTR_MethodDesc m_pStubMD;
PTR_MethodDesc m_pStubTargetMD;
ILStubType m_type;
- DWORD m_dwJitFlags;
+ CORJIT_FLAGS m_jitFlags;
};
typedef Holder<ILStubResolver*, DoNothing<ILStubResolver*>, ILStubResolver::StubGenFailed, NULL> ILStubGenHolder;
diff --git a/src/vm/interoputil.cpp b/src/vm/interoputil.cpp
index 33f04b9ab8..6a0fbded12 100644
--- a/src/vm/interoputil.cpp
+++ b/src/vm/interoputil.cpp
@@ -2130,7 +2130,7 @@ void MinorCleanupSyncBlockComData(InteropSyncBlockInfo* pInteropInfo)
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- PRECONDITION( GCHeap::IsGCInProgress() || ( (g_fEEShutDown & ShutDown_SyncBlock) && g_fProcessDetach ) );
+ PRECONDITION( GCHeapUtilities::IsGCInProgress() || ( (g_fEEShutDown & ShutDown_SyncBlock) && g_fProcessDetach ) );
}
CONTRACTL_END;
diff --git a/src/vm/interpreter.cpp b/src/vm/interpreter.cpp
index a540cff0b0..b74672f4f0 100644
--- a/src/vm/interpreter.cpp
+++ b/src/vm/interpreter.cpp
@@ -14,7 +14,7 @@
#include "openum.h"
#include "fcall.h"
#include "frames.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include <float.h>
#include "jitinterface.h"
#include "safemath.h"
@@ -903,9 +903,10 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
#endif
{
// But we also have to use r4, because ThumbEmitCondRegJump below requires a low register.
+ sl.ThumbEmitMovConstant(r11, 0);
sl.ThumbEmitMovConstant(r12, UINT_PTR(interpMethInfo));
sl.ThumbEmitLoadRegIndirect(r12, r12, offsetof(InterpreterMethodInfo, m_jittedCode));
- sl.ThumbEmitCmpImm(r12, 0); // Set condition codes.
+ sl.ThumbEmitCmpReg(r12, r11); // Set condition codes.
// If r12 is zero, then go on to do the interpretation.
CodeLabel* doInterpret = sl.NewCodeLabel();
sl.ThumbEmitCondFlagJump(doInterpret, thumbCondEq.cond);
@@ -1578,7 +1579,7 @@ CorJitResult Interpreter::GenerateInterpreterStub(CEEInfo* comp,
#else
#error unsupported platform
#endif
- stub = sl.Link();
+ stub = sl.Link(SystemDomain::GetGlobalLoaderAllocator()->GetStubHeap());
*nativeSizeOfCode = static_cast<ULONG>(stub->GetNumCodeBytes());
// TODO: manage reference count of interpreter stubs. Look for examples...
@@ -1736,13 +1737,13 @@ void Interpreter::JitMethodIfAppropriate(InterpreterMethodInfo* interpMethInfo,
fprintf(GetLogFile(), "JITting method %s:%s.\n", md->m_pszDebugClassName, md->m_pszDebugMethodName);
}
#endif // _DEBUG
- DWORD dwFlags = CORJIT_FLG_MAKEFINALCODE;
+ CORJIT_FLAGS jitFlags(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE);
NewHolder<COR_ILMETHOD_DECODER> pDecoder(NULL);
// Dynamic methods (e.g., IL stubs) do not have an IL decoder but may
// require additional flags. Ordinary methods require the opposite.
if (md->IsDynamicMethod())
{
- dwFlags |= md->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags();
+ jitFlags.Add(md->AsDynamicMethodDesc()->GetILStubResolver()->GetJitFlags());
}
else
{
@@ -1751,7 +1752,7 @@ void Interpreter::JitMethodIfAppropriate(InterpreterMethodInfo* interpMethInfo,
md->GetMDImport(),
&status);
}
- PCODE res = md->MakeJitWorker(pDecoder, dwFlags, 0);
+ PCODE res = md->MakeJitWorker(pDecoder, jitFlags);
interpMethInfo->m_jittedCode = res;
}
}
@@ -8607,6 +8608,8 @@ void Interpreter::BoxStructRefAt(unsigned ind, CORINFO_CLASS_HANDLE valCls)
if (th.IsTypeDesc())
COMPlusThrow(kInvalidOperationException,W("InvalidOperation_TypeCannotBeBoxed"));
+ MethodTable* pMT = th.AsMethodTable();
+
{
Object* res = OBJECTREFToObject(pMT->Box(valPtr));
@@ -9578,7 +9581,9 @@ void Interpreter::DoCallWork(bool virtualCall, void* thisArg, CORINFO_RESOLVED_T
// This is the argument slot that will be used to hold the return value.
ARG_SLOT retVal = 0;
+#ifndef _ARM_
_ASSERTE (NUMBER_RETURNVALUE_SLOTS == 1);
+#endif
// If the return type is a structure, then these will be initialized.
CORINFO_CLASS_HANDLE retTypeClsHnd = NULL;
@@ -10316,15 +10321,23 @@ void Interpreter::CallI()
}
else
{
- pMD = g_pPrepareConstrainedRegionsMethod; // A random static method.
+ pMD = g_pExecuteBackoutCodeHelperMethod; // A random static method.
}
MethodDescCallSite mdcs(pMD, &mSig, ftnPtr);
+#if 0
// If the current method being interpreted is an IL stub, we're calling native code, so
// change the GC mode. (We'll only do this at the call if the calling convention turns out
// to be a managed calling convention.)
MethodDesc* pStubContextMD = reinterpret_cast<MethodDesc*>(m_stubContext);
bool transitionToPreemptive = (pStubContextMD != NULL && !pStubContextMD->IsIL());
mdcs.CallTargetWorker(args, &retVal, sizeof(retVal), transitionToPreemptive);
+#else
+ // TODO The code above triggers assertion at threads.cpp:6861:
+ // _ASSERTE(thread->PreemptiveGCDisabled()); // Should have been in managed code
+ // The workaround will likely break more things than what it is fixing:
+ // just do not make transition to preemptive GC for now.
+ mdcs.CallTargetWorker(args, &retVal, sizeof(retVal));
+#endif
}
// retVal is now vulnerable.
GCX_FORBID();
diff --git a/src/vm/interpreter.h b/src/vm/interpreter.h
index 92835be92e..1151b36913 100644
--- a/src/vm/interpreter.h
+++ b/src/vm/interpreter.h
@@ -718,7 +718,7 @@ class InterpreterCEEInfo: public CEEInfo
{
CEEJitInfo m_jitInfo;
public:
- InterpreterCEEInfo(CORINFO_METHOD_HANDLE meth): CEEInfo((MethodDesc*)meth), m_jitInfo((MethodDesc*)meth, NULL, NULL, CorJitFlag(0)) { m_pOverride = this; }
+ InterpreterCEEInfo(CORINFO_METHOD_HANDLE meth): CEEInfo((MethodDesc*)meth), m_jitInfo((MethodDesc*)meth, NULL, NULL, CORJIT_FLAGS::CORJIT_FLAG_SPEED_OPT) { m_pOverride = this; }
// Certain methods are unimplemented by CEEInfo (they hit an assert). They are implemented by CEEJitInfo, yet
// don't seem to require any of the CEEJitInfo state we can't provide. For those case, delegate to the "partial"
diff --git a/src/vm/jithelpers.cpp b/src/vm/jithelpers.cpp
index 1626810758..7b9389d5b6 100644
--- a/src/vm/jithelpers.cpp
+++ b/src/vm/jithelpers.cpp
@@ -23,7 +23,7 @@
#include "security.h"
#include "securitymeta.h"
#include "dllimport.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "comdelegate.h"
#include "jitperf.h" // to track jit perf
#include "corprof.h"
@@ -130,7 +130,7 @@ inline UINT64 ShiftToHi32Bits(UINT32 x)
return ret.QuadPart;
}
-#if !defined(_TARGET_X86_)
+#if !defined(_TARGET_X86_) || defined(FEATURE_PAL)
/*********************************************************************/
HCIMPL2_VV(INT64, JIT_LMul, INT64 val1, INT64 val2)
{
@@ -145,7 +145,7 @@ HCIMPL2_VV(INT64, JIT_LMul, INT64 val1, INT64 val2)
return (val1 * val2);
}
HCIMPLEND
-#endif // !defined(_TARGET_X86_)
+#endif // !_TARGET_X86_ || FEATURE_PAL
/*********************************************************************/
HCIMPL2_VV(INT64, JIT_LMulOvf, INT64 val1, INT64 val2)
@@ -513,7 +513,7 @@ HCIMPL1_V(double, JIT_ULng2Dbl, UINT64 val)
HCIMPLEND
/*********************************************************************/
-// needed for ARM
+// needed for ARM and RyuJIT-x86
HCIMPL1_V(double, JIT_Lng2Dbl, INT64 val)
{
FCALL_CONTRACT;
@@ -619,7 +619,7 @@ HCIMPL1_V(UINT64, JIT_Dbl2ULng, double val)
else {
// subtract 0x8000000000000000, do the convert then add it back again
ret = FastDbl2Lng(val - two63) + I64(0x8000000000000000);
-}
+ }
return ret;
}
HCIMPLEND
@@ -654,7 +654,7 @@ HCIMPL1_V(UINT64, JIT_Dbl2ULngOvf, double val)
HCIMPLEND
-#if !defined(_TARGET_X86_)
+#if !defined(_TARGET_X86_) || defined(FEATURE_PAL)
HCIMPL1_V(INT64, JIT_Dbl2Lng, double val)
{
@@ -755,7 +755,7 @@ HCIMPL2_VV(double, JIT_DblRem, double dividend, double divisor)
}
HCIMPLEND
-#endif // !defined(_TARGET_X86_)
+#endif // !_TARGET_X86_ || FEATURE_PAL
#include <optdefault.h>
@@ -2858,7 +2858,7 @@ HCIMPL1(Object*, JIT_NewS_MP_FastPortable, CORINFO_CLASS_HANDLE typeHnd_)
do
{
- _ASSERTE(GCHeap::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseAllocationContexts());
// This is typically the only call in the fast path. Making the call early seems to be better, as it allows the compiler
// to use volatile registers for intermediate values. This reduces the number of push/pop instructions and eliminates
@@ -2872,7 +2872,7 @@ HCIMPL1(Object*, JIT_NewS_MP_FastPortable, CORINFO_CLASS_HANDLE typeHnd_)
SIZE_T size = methodTable->GetBaseSize();
_ASSERTE(size % DATA_ALIGNMENT == 0);
- alloc_context *allocContext = thread->GetAllocContext();
+ gc_alloc_context *allocContext = thread->GetAllocContext();
BYTE *allocPtr = allocContext->alloc_ptr;
_ASSERTE(allocPtr <= allocContext->alloc_limit);
if (size > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr))
@@ -2997,7 +2997,7 @@ HCIMPL1(StringObject*, AllocateString_MP_FastPortable, DWORD stringLength)
do
{
- _ASSERTE(GCHeap::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseAllocationContexts());
// Instead of doing elaborate overflow checks, we just limit the number of elements. This will avoid all overflow
// problems, as well as making sure big string objects are correctly allocated in the big object heap.
@@ -3021,7 +3021,7 @@ HCIMPL1(StringObject*, AllocateString_MP_FastPortable, DWORD stringLength)
_ASSERTE(alignedTotalSize >= totalSize);
totalSize = alignedTotalSize;
- alloc_context *allocContext = thread->GetAllocContext();
+ gc_alloc_context *allocContext = thread->GetAllocContext();
BYTE *allocPtr = allocContext->alloc_ptr;
_ASSERTE(allocPtr <= allocContext->alloc_limit);
if (totalSize > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr))
@@ -3161,7 +3161,7 @@ HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeHn
do
{
- _ASSERTE(GCHeap::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseAllocationContexts());
// Do a conservative check here. This is to avoid overflow while doing the calculations. We don't
// have to worry about "large" objects, since the allocation quantum is never big enough for
@@ -3198,7 +3198,7 @@ HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeHn
_ASSERTE(alignedTotalSize >= totalSize);
totalSize = alignedTotalSize;
- alloc_context *allocContext = thread->GetAllocContext();
+ gc_alloc_context *allocContext = thread->GetAllocContext();
BYTE *allocPtr = allocContext->alloc_ptr;
_ASSERTE(allocPtr <= allocContext->alloc_limit);
if (totalSize > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr))
@@ -3238,7 +3238,7 @@ HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeH
do
{
- _ASSERTE(GCHeap::UseAllocationContexts());
+ _ASSERTE(GCHeapUtilities::UseAllocationContexts());
// Make sure that the total size cannot reach LARGE_OBJECT_SIZE, which also allows us to avoid overflow checks. The
// "256" slack is to cover the array header size and round-up, using a constant value here out of laziness.
@@ -3266,7 +3266,7 @@ HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeH
_ASSERTE(ALIGN_UP(totalSize, DATA_ALIGNMENT) == totalSize);
- alloc_context *allocContext = thread->GetAllocContext();
+ gc_alloc_context *allocContext = thread->GetAllocContext();
BYTE *allocPtr = allocContext->alloc_ptr;
_ASSERTE(allocPtr <= allocContext->alloc_limit);
if (totalSize > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr))
@@ -6431,7 +6431,7 @@ HCIMPL0(VOID, JIT_StressGC)
bool fSkipGC = false;
if (!fSkipGC)
- GCHeap::GetGCHeap()->GarbageCollect();
+ GCHeapUtilities::GetGCHeap()->GarbageCollect();
// <TODO>@TODO: the following ifdef is in error, but if corrected the
// compiler complains about the *__ms->pRetAddr() saying machine state
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index 76d4568adb..2f9db3d596 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -27,7 +27,7 @@
#include "security.h"
#include "securitymeta.h"
#include "dllimport.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "comdelegate.h"
#include "jitperf.h" // to track jit perf
#include "corprof.h"
@@ -2303,11 +2303,21 @@ unsigned CEEInfo::getClassGClayout (CORINFO_CLASS_HANDLE clsHnd, BYTE* gcPtrs)
MethodTable* pMT = VMClsHnd.GetMethodTable();
- if (pMT == g_TypedReferenceMT)
+ if (pMT->IsByRefLike())
{
- gcPtrs[0] = TYPE_GC_BYREF;
- gcPtrs[1] = TYPE_GC_NONE;
- result = 1;
+ if (pMT == g_TypedReferenceMT)
+ {
+ gcPtrs[0] = TYPE_GC_BYREF;
+ gcPtrs[1] = TYPE_GC_NONE;
+ result = 1;
+ }
+ else
+ {
+ // TODO-SPAN: Proper GC reporting
+ memset(gcPtrs, TYPE_GC_NONE,
+ (VMClsHnd.GetSize() + sizeof(void*) -1)/ sizeof(void*));
+ result = 0;
+ }
}
else if (VMClsHnd.IsNativeValueType())
{
@@ -3988,7 +3998,7 @@ DWORD CEEInfo::getClassAttribsInternal (CORINFO_CLASS_HANDLE clsHnd)
if (pMT->IsMarshaledByRef())
ret |= CORINFO_FLG_MARSHAL_BYREF;
- if (pMT->ContainsPointers())
+ if (pMT->ContainsPointers() || pMT == g_TypedReferenceMT)
ret |= CORINFO_FLG_CONTAINS_GC_PTR;
if (pMT->IsDelegate())
@@ -5027,6 +5037,7 @@ void CEEInfo::getCallInfo(
}
+#ifdef FEATURE_CER
if (pMD == g_pPrepareConstrainedRegionsMethod && !isVerifyOnly())
{
MethodDesc * methodFromContext = GetMethodFromContext(pResolvedToken->tokenContext);
@@ -5048,6 +5059,7 @@ void CEEInfo::getCallInfo(
}
}
}
+#endif // FEATURE_CER
TypeHandle exactType = TypeHandle(pResolvedToken->hClass);
@@ -5093,6 +5105,19 @@ void CEEInfo::getCallInfo(
// shared generic code - it may just resolve it to a candidate suitable for
// JIT compilation, and require a runtime lookup for the actual code pointer
// to call.
+ if (constrainedType.IsEnum())
+ {
+ // Optimize constrained calls to enum's GetHashCode method. TryResolveConstraintMethodApprox would return
+ // null since the virtual method resolves to System.Enum's implementation and that's a reference type.
+ // We can't do this for any other method since ToString and Equals have different semantics for enums
+ // and their underlying type.
+ if (pMD->GetSlot() == MscorlibBinder::GetMethod(METHOD__OBJECT__GET_HASH_CODE)->GetSlot())
+ {
+ // Pretend this was a "constrained. UnderlyingType" instruction prefix
+ constrainedType = TypeHandle(MscorlibBinder::GetElementType(constrainedType.GetVerifierCorElementType()));
+ }
+ }
+
MethodDesc * directMethod = constrainedType.GetMethodTable()->TryResolveConstraintMethodApprox(
exactType,
pMD,
@@ -6767,6 +6792,28 @@ void getMethodInfoILMethodHeaderHelper(
(CorInfoOptions)((header->GetFlags() & CorILMethod_InitLocals) ? CORINFO_OPT_INIT_LOCALS : 0) ;
}
+mdToken FindGenericMethodArgTypeSpec(IMDInternalImport* pInternalImport)
+{
+ STANDARD_VM_CONTRACT;
+
+ HENUMInternalHolder hEnumTypeSpecs(pInternalImport);
+ mdToken token;
+
+ static const BYTE signature[] = { ELEMENT_TYPE_MVAR, 0 };
+
+ hEnumTypeSpecs.EnumAllInit(mdtTypeSpec);
+ while (hEnumTypeSpecs.EnumNext(&token))
+ {
+ PCCOR_SIGNATURE pSig;
+ ULONG cbSig;
+ IfFailThrow(pInternalImport->GetTypeSpecFromToken(token, &pSig, &cbSig));
+ if (cbSig == sizeof(signature) && memcmp(pSig, signature, cbSig) == 0)
+ return token;
+ }
+
+ COMPlusThrowHR(COR_E_BADIMAGEFORMAT);
+}
+
/*********************************************************************
IL is the most efficient and portable way to implement certain low level methods
@@ -6878,10 +6925,165 @@ bool getILIntrinsicImplementation(MethodDesc * ftn,
return true;
}
}
+#ifdef FEATURE_SPAN_OF_T
+ else if (tk == MscorlibBinder::GetMethod(METHOD__JIT_HELPERS__BYREF_LESSTHAN)->GetMemberDef())
+ {
+ // Compare the two arguments
+ static const BYTE ilcode[] = { CEE_LDARG_0, CEE_LDARG_1, CEE_PREFIX1, (CEE_CLT & 0xFF), CEE_RET };
+ methInfo->ILCode = const_cast<BYTE*>(ilcode);
+ methInfo->ILCodeSize = sizeof(ilcode);
+ methInfo->maxStack = 2;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
+ else if (tk == MscorlibBinder::GetMethod(METHOD__JIT_HELPERS__GET_ARRAY_DATA)->GetMemberDef())
+ {
+ mdToken tokArrayPinningHelper = MscorlibBinder::GetField(FIELD__ARRAY_PINNING_HELPER__M_ARRAY_DATA)->GetMemberDef();
+
+ static BYTE ilcode[] = { CEE_LDARG_0,
+ CEE_LDFLDA,0,0,0,0,
+ CEE_RET };
+
+ ilcode[2] = (BYTE)(tokArrayPinningHelper);
+ ilcode[3] = (BYTE)(tokArrayPinningHelper >> 8);
+ ilcode[4] = (BYTE)(tokArrayPinningHelper >> 16);
+ ilcode[5] = (BYTE)(tokArrayPinningHelper >> 24);
+
+ methInfo->ILCode = const_cast<BYTE*>(ilcode);
+ methInfo->ILCodeSize = sizeof(ilcode);
+ methInfo->maxStack = 1;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
+ else if (tk == MscorlibBinder::GetMethod(METHOD__JIT_HELPERS__CONTAINSREFERENCES)->GetMemberDef())
+ {
+ _ASSERTE(ftn->HasMethodInstantiation());
+ Instantiation inst = ftn->GetMethodInstantiation();
+
+ _ASSERTE(ftn->GetNumGenericMethodArgs() == 1);
+ TypeHandle typeHandle = inst[0];
+ MethodTable * methodTable = typeHandle.GetMethodTable();
+
+ static const BYTE returnTrue[] = { CEE_LDC_I4_1, CEE_RET };
+ static const BYTE returnFalse[] = { CEE_LDC_I4_0, CEE_RET };
+
+ if (!methodTable->IsValueType() || methodTable->ContainsPointers())
+ {
+ methInfo->ILCode = const_cast<BYTE*>(returnTrue);
+ }
+ else
+ {
+ methInfo->ILCode = const_cast<BYTE*>(returnFalse);
+ }
+
+ methInfo->ILCodeSize = sizeof(returnTrue);
+ methInfo->maxStack = 1;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
+#endif // FEATURE_SPAN_OF_T
return false;
}
+#ifdef FEATURE_SPAN_OF_T
+bool getILIntrinsicImplementationForUnsafe(MethodDesc * ftn,
+ CORINFO_METHOD_INFO * methInfo)
+{
+ STANDARD_VM_CONTRACT;
+
+ // Precondition: ftn is a method in mscorlib
+ _ASSERTE(ftn->GetModule()->IsSystem());
+
+ mdMethodDef tk = ftn->GetMemberDef();
+
+ if (tk == MscorlibBinder::GetMethod(METHOD__UNSAFE__AS_POINTER)->GetMemberDef())
+ {
+ // Return the argument that was passed in.
+ static const BYTE ilcode[] = { CEE_LDARG_0, CEE_CONV_U, CEE_RET };
+ methInfo->ILCode = const_cast<BYTE*>(ilcode);
+ methInfo->ILCodeSize = sizeof(ilcode);
+ methInfo->maxStack = 1;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
+ if (tk == MscorlibBinder::GetMethod(METHOD__UNSAFE__SIZEOF)->GetMemberDef())
+ {
+ _ASSERTE(ftn->HasMethodInstantiation());
+ Instantiation inst = ftn->GetMethodInstantiation();
+
+ _ASSERTE(ftn->GetNumGenericMethodArgs() == 1);
+ mdToken tokGenericArg = FindGenericMethodArgTypeSpec(MscorlibBinder::GetModule()->GetMDImport());
+
+ static BYTE ilcode[] = { CEE_PREFIX1, (CEE_SIZEOF & 0xFF), 0,0,0,0, CEE_RET };
+
+ ilcode[2] = (BYTE)(tokGenericArg);
+ ilcode[3] = (BYTE)(tokGenericArg >> 8);
+ ilcode[4] = (BYTE)(tokGenericArg >> 16);
+ ilcode[5] = (BYTE)(tokGenericArg >> 24);
+
+ methInfo->ILCode = const_cast<BYTE*>(ilcode);
+ methInfo->ILCodeSize = sizeof(ilcode);
+ methInfo->maxStack = 1;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
+ else if (tk == MscorlibBinder::GetMethod(METHOD__UNSAFE__BYREF_AS)->GetMemberDef())
+ {
+ // Return the argument that was passed in.
+ static const BYTE ilcode[] = { CEE_LDARG_0, CEE_RET };
+ methInfo->ILCode = const_cast<BYTE*>(ilcode);
+ methInfo->ILCodeSize = sizeof(ilcode);
+ methInfo->maxStack = 1;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
+ else if (tk == MscorlibBinder::GetMethod(METHOD__UNSAFE__BYREF_ADD)->GetMemberDef())
+ {
+ mdToken tokGenericArg = FindGenericMethodArgTypeSpec(MscorlibBinder::GetModule()->GetMDImport());
+
+ static BYTE ilcode[] = { CEE_LDARG_1,
+ CEE_PREFIX1, (CEE_SIZEOF & 0xFF), 0,0,0,0,
+ CEE_CONV_I,
+ CEE_MUL,
+ CEE_LDARG_0,
+ CEE_ADD,
+ CEE_RET };
+
+ ilcode[3] = (BYTE)(tokGenericArg);
+ ilcode[4] = (BYTE)(tokGenericArg >> 8);
+ ilcode[5] = (BYTE)(tokGenericArg >> 16);
+ ilcode[6] = (BYTE)(tokGenericArg >> 24);
+
+ methInfo->ILCode = const_cast<BYTE*>(ilcode);
+ methInfo->ILCodeSize = sizeof(ilcode);
+ methInfo->maxStack = 2;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
+ else if (tk == MscorlibBinder::GetMethod(METHOD__UNSAFE__BYREF_ARE_SAME)->GetMemberDef())
+ {
+ // Compare the two arguments
+ static const BYTE ilcode[] = { CEE_LDARG_0, CEE_LDARG_1, CEE_PREFIX1, (CEE_CEQ & 0xFF), CEE_RET };
+ methInfo->ILCode = const_cast<BYTE*>(ilcode);
+ methInfo->ILCodeSize = sizeof(ilcode);
+ methInfo->maxStack = 2;
+ methInfo->EHcount = 0;
+ methInfo->options = (CorInfoOptions)0;
+ return true;
+ }
+
+ return false;
+}
+#endif // FEATURE_SPAN_OF_T
+
bool getILIntrinsicImplementationForVolatile(MethodDesc * ftn,
CORINFO_METHOD_INFO * methInfo)
{
@@ -7062,6 +7264,12 @@ getMethodInfoHelper(
{
fILIntrinsic = getILIntrinsicImplementation(ftn, methInfo);
}
+#ifdef FEATURE_SPAN_OF_T
+ else if (MscorlibBinder::IsClass(pMT, CLASS__UNSAFE))
+ {
+ fILIntrinsic = getILIntrinsicImplementationForUnsafe(ftn, methInfo);
+ }
+#endif
else if (MscorlibBinder::IsClass(pMT, CLASS__INTERLOCKED))
{
fILIntrinsic = getILIntrinsicImplementationForInterlocked(ftn, methInfo);
@@ -7394,11 +7602,15 @@ CorInfoInline CEEInfo::canInline (CORINFO_METHOD_HANDLE hCaller,
// If the callee wants debuggable code, don't allow it to be inlined
- if (GetDebuggerCompileFlags(pCallee->GetModule(), 0) & CORJIT_FLG_DEBUG_CODE)
{
- result = INLINE_NEVER;
- szFailReason = "Inlinee is debuggable";
- goto exit;
+ // Combining the next two lines, and eliminating jitDebuggerFlags, leads to bad codegen in x86 Release builds using Visual C++ 19.00.24215.1.
+ CORJIT_FLAGS jitDebuggerFlags = GetDebuggerCompileFlags(pCallee->GetModule(), CORJIT_FLAGS());
+ if (jitDebuggerFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE))
+ {
+ result = INLINE_NEVER;
+ szFailReason = "Inlinee is debuggable";
+ goto exit;
+ }
}
#endif
@@ -8167,6 +8379,7 @@ bool CEEInfo::canTailCall (CORINFO_METHOD_HANDLE hCaller,
}
}
+#ifdef FEATURE_CER
// We cannot tail call from a root CER method, the thread abort algorithm to
// detect CERs depends on seeing such methods on the stack.
if (IsCerRootMethod(pCaller))
@@ -8175,6 +8388,7 @@ bool CEEInfo::canTailCall (CORINFO_METHOD_HANDLE hCaller,
szFailReason = "Caller is a CER root";
goto exit;
}
+#endif // FEATURE_CER
result = true;
@@ -8627,6 +8841,9 @@ void CEEInfo::getFunctionEntryPoint(CORINFO_METHOD_HANDLE ftnHnd,
JIT_TO_EE_TRANSITION();
MethodDesc * ftn = GetMethod(ftnHnd);
+#if defined(FEATURE_GDBJIT)
+ MethodDesc * orig_ftn = ftn;
+#endif
// Resolve methodImpl.
ftn = ftn->GetMethodTable()->MapMethodDeclToMethodImpl(ftn);
@@ -8645,6 +8862,12 @@ void CEEInfo::getFunctionEntryPoint(CORINFO_METHOD_HANDLE ftnHnd,
accessType = IAT_PVALUE;
}
+
+#if defined(FEATURE_GDBJIT)
+ CalledMethod * pCM = new CalledMethod(orig_ftn, ret, m_pCalledMethods);
+ m_pCalledMethods = pCM;
+#endif
+
EE_TO_JIT_TRANSITION();
_ASSERTE(ret != NULL);
@@ -9103,7 +9326,7 @@ CorInfoTypeWithMod CEEInfo::getArgType (
CorElementType normType = typeHnd.GetInternalCorElementType();
// if we are looking up a value class, don't morph it to a refernece type
- // (This can only happen in illegal IL
+ // (This can only happen in illegal IL)
if (!CorTypeInfo::IsObjRef(normType) || type != ELEMENT_TYPE_VALUETYPE)
{
type = normType;
@@ -11677,8 +11900,7 @@ static CorJitResult CompileMethodWithEtwWrapper(EEJitManager *jitMgr,
CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
CEEInfo *comp,
struct CORINFO_METHOD_INFO *info,
- unsigned flags,
- unsigned flags2,
+ CORJIT_FLAGS jitFlags,
BYTE **nativeEntry,
ULONG *nativeSizeOfCode)
{
@@ -11689,13 +11911,9 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
CorJitResult ret = CORJIT_SKIPPED; // Note that CORJIT_SKIPPED is an error exit status code
- CORJIT_FLAGS jitFlags = { 0 };
- jitFlags.corJitFlags = flags;
- jitFlags.corJitFlags2 = flags2;
-
#if !defined(FEATURE_CORECLR)
// Ask the JIT to generate desktop-quirk-compatible code.
- jitFlags.corJitFlags2 |= CORJIT_FLG2_DESKTOP_QUIRKS;
+ jitFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_DESKTOP_QUIRKS);
#endif
comp->setJitFlags(jitFlags);
@@ -11711,7 +11929,7 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
#if defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR)
ret = getJit()->compileMethod( comp,
info,
- CORJIT_FLG_CALL_GETJITFLAGS,
+ CORJIT_FLAGS::CORJIT_FLAG_CALL_GETJITFLAGS,
nativeEntry,
nativeSizeOfCode);
@@ -11720,18 +11938,18 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
#if defined(ALLOW_SXS_JIT) && !defined(CROSSGEN_COMPILE)
if (FAILED(ret) && jitMgr->m_alternateJit
#ifdef FEATURE_STACK_SAMPLING
- && (!samplingEnabled || (jitFlags.corJitFlags2 & CORJIT_FLG2_SAMPLING_JIT_BACKGROUND))
+ && (!samplingEnabled || (jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_SAMPLING_JIT_BACKGROUND)))
#endif
)
{
ret = jitMgr->m_alternateJit->compileMethod( comp,
info,
- CORJIT_FLG_CALL_GETJITFLAGS,
+ CORJIT_FLAGS::CORJIT_FLAG_CALL_GETJITFLAGS,
nativeEntry,
nativeSizeOfCode );
#ifdef FEATURE_STACK_SAMPLING
- if (jitFlags.corJitFlags2 & CORJIT_FLG2_SAMPLING_JIT_BACKGROUND)
+ if (jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_SAMPLING_JIT_BACKGROUND))
{
// Don't bother with failures if we couldn't collect a trace.
ret = CORJIT_OK;
@@ -11758,7 +11976,7 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
{
// If we're doing an "import_only" compilation, it's for verification, so don't interpret.
// (We assume that importation is completely architecture-independent, or at least nearly so.)
- if (FAILED(ret) && (jitFlags.corJitFlags & (CORJIT_FLG_IMPORT_ONLY | CORJIT_FLG_MAKEFINALCODE)) == 0)
+ if (FAILED(ret) && !jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY) && !jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE))
{
ret = Interpreter::GenerateInterpreterStub(comp, info, nativeEntry, nativeSizeOfCode);
}
@@ -11769,7 +11987,7 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
ret = CompileMethodWithEtwWrapper(jitMgr,
comp,
info,
- CORJIT_FLG_CALL_GETJITFLAGS,
+ CORJIT_FLAGS::CORJIT_FLAG_CALL_GETJITFLAGS,
nativeEntry,
nativeSizeOfCode);
}
@@ -11778,7 +11996,7 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
{
// If we're doing an "import_only" compilation, it's for verification, so don't interpret.
// (We assume that importation is completely architecture-independent, or at least nearly so.)
- if (FAILED(ret) && (jitFlags.corJitFlags & (CORJIT_FLG_IMPORT_ONLY | CORJIT_FLG_MAKEFINALCODE)) == 0)
+ if (FAILED(ret) && !jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY) && !jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE))
{
ret = Interpreter::GenerateInterpreterStub(comp, info, nativeEntry, nativeSizeOfCode);
}
@@ -11788,7 +12006,7 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
{
ret = jitMgr->m_jit->compileMethod( comp,
info,
- CORJIT_FLG_CALL_GETJITFLAGS,
+ CORJIT_FLAGS::CORJIT_FLAG_CALL_GETJITFLAGS,
nativeEntry,
nativeSizeOfCode);
}
@@ -11800,7 +12018,7 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
// If the JIT fails we keep the IL around and will
// try reJIT the same IL. VSW 525059
//
- if (SUCCEEDED(ret) && !(jitFlags.corJitFlags & CORJIT_FLG_IMPORT_ONLY) && !((CEEJitInfo*)comp)->JitAgain())
+ if (SUCCEEDED(ret) && !jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY) && !((CEEJitInfo*)comp)->JitAgain())
{
((CEEJitInfo*)comp)->CompressDebugInfo();
@@ -11815,6 +12033,14 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
#endif // defined(CROSSGEN_COMPILE) && !defined(FEATURE_CORECLR)
+#if defined(FEATURE_GDBJIT)
+ if (SUCCEEDED(ret) && *nativeEntry != NULL)
+ {
+ CodeHeader* pCH = ((CodeHeader*)((PCODE)*nativeEntry & ~1)) - 1;
+ pCH->SetCalledMethods((PTR_VOID)comp->GetCalledMethods());
+ }
+#endif
+
END_SO_TOLERANT_CODE;
return ret;
@@ -11825,8 +12051,7 @@ CorJitResult invokeCompileMethodHelper(EEJitManager *jitMgr,
CorJitResult invokeCompileMethod(EEJitManager *jitMgr,
CEEInfo *comp,
struct CORINFO_METHOD_INFO *info,
- unsigned flags,
- unsigned flags2,
+ CORJIT_FLAGS jitFlags,
BYTE **nativeEntry,
ULONG *nativeSizeOfCode)
{
@@ -11841,7 +12066,7 @@ CorJitResult invokeCompileMethod(EEJitManager *jitMgr,
GCX_PREEMP();
- CorJitResult ret = invokeCompileMethodHelper(jitMgr, comp, info, flags, flags2, nativeEntry, nativeSizeOfCode);
+ CorJitResult ret = invokeCompileMethodHelper(jitMgr, comp, info, jitFlags, nativeEntry, nativeSizeOfCode);
//
// Verify that we are still in preemptive mode when we return
@@ -11853,9 +12078,9 @@ CorJitResult invokeCompileMethod(EEJitManager *jitMgr,
return ret;
}
-CorJitFlag GetCompileFlagsIfGenericInstantiation(
+CORJIT_FLAGS GetCompileFlagsIfGenericInstantiation(
CORINFO_METHOD_HANDLE method,
- CorJitFlag compileFlags,
+ CORJIT_FLAGS compileFlags,
ICorJitInfo * pCorJitInfo,
BOOL * raiseVerificationException,
BOOL * unverifiableGenericCode);
@@ -11863,8 +12088,7 @@ CorJitFlag GetCompileFlagsIfGenericInstantiation(
CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr,
CEEInfo *comp,
struct CORINFO_METHOD_INFO *info,
- unsigned flags,
- unsigned flags2,
+ CORJIT_FLAGS flags,
BYTE **nativeEntry,
ULONG *nativeSizeOfCode,
MethodDesc *ftn)
@@ -11880,8 +12104,7 @@ CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr,
EEJitManager *jitMgr;
CEEInfo *comp;
struct CORINFO_METHOD_INFO *info;
- unsigned flags;
- unsigned flags2;
+ CORJIT_FLAGS flags;
BYTE **nativeEntry;
ULONG *nativeSizeOfCode;
MethodDesc *ftn;
@@ -11891,7 +12114,6 @@ CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr,
param.comp = comp;
param.info = info;
param.flags = flags;
- param.flags2 = flags2;
param.nativeEntry = nativeEntry;
param.nativeSizeOfCode = nativeSizeOfCode;
param.ftn = ftn;
@@ -11907,16 +12129,16 @@ CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr,
pParam->comp,
pParam->info,
pParam->flags,
- pParam->flags2,
pParam->nativeEntry,
pParam->nativeSizeOfCode);
}
PAL_FINALLY
{
#if defined(DEBUGGING_SUPPORTED) && !defined(CROSSGEN_COMPILE)
- if (!(flags & (CORJIT_FLG_IMPORT_ONLY | CORJIT_FLG_MCJIT_BACKGROUND))
+ if (!flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY) &&
+ !flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_MCJIT_BACKGROUND)
#ifdef FEATURE_STACK_SAMPLING
- && !(flags2 & CORJIT_FLG2_SAMPLING_JIT_BACKGROUND)
+ && !flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_SAMPLING_JIT_BACKGROUND)
#endif // FEATURE_STACK_SAMPLING
)
{
@@ -11954,7 +12176,7 @@ CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr,
/*********************************************************************/
// Figures out the compile flags that are used by both JIT and NGen
-/* static */ DWORD CEEInfo::GetBaseCompileFlags(MethodDesc * ftn)
+/* static */ CORJIT_FLAGS CEEInfo::GetBaseCompileFlags(MethodDesc * ftn)
{
CONTRACTL {
THROWS;
@@ -11965,16 +12187,16 @@ CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr,
// Figure out the code quality flags
//
- DWORD flags = 0;
+ CORJIT_FLAGS flags;
if (g_pConfig->JitFramed())
- flags |= CORJIT_FLG_FRAMED;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_FRAMED);
if (g_pConfig->JitAlignLoops())
- flags |= CORJIT_FLG_ALIGN_LOOPS;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_ALIGN_LOOPS);
if (ReJitManager::IsReJITEnabled() || g_pConfig->AddRejitNops())
- flags |= CORJIT_FLG_PROF_REJIT_NOPS;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_PROF_REJIT_NOPS);
#ifdef _TARGET_X86_
if (g_pConfig->PInvokeRestoreEsp(ftn->GetModule()->IsPreV4Assembly()))
- flags |= CORJIT_FLG_PINVOKE_RESTORE_ESP;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_PINVOKE_RESTORE_ESP);
#endif // _TARGET_X86_
//See if we should instruct the JIT to emit calls to JIT_PollGC for thread suspension. If we have a
@@ -11982,9 +12204,9 @@ CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr,
#ifdef FEATURE_ENABLE_GCPOLL
EEConfig::GCPollType pollType = g_pConfig->GetGCPollType();
if (EEConfig::GCPOLL_TYPE_POLL == pollType)
- flags |= CORJIT_FLG_GCPOLL_CALLS;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_GCPOLL_CALLS);
else if (EEConfig::GCPOLL_TYPE_INLINE == pollType)
- flags |= CORJIT_FLG_GCPOLL_INLINE;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_GCPOLL_INLINE);
#endif //FEATURE_ENABLE_GCPOLL
// Set flags based on method's ImplFlags.
@@ -11995,13 +12217,13 @@ CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr,
if (IsMiNoOptimization(dwImplFlags))
{
- flags |= CORJIT_FLG_MIN_OPT;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MIN_OPT);
}
// Always emit frames for methods marked no-inline (see #define ETW_EBP_FRAMED in the JIT)
if (IsMiNoInlining(dwImplFlags))
{
- flags |= CORJIT_FLG_FRAMED;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_FRAMED);
}
}
@@ -12012,7 +12234,7 @@ CorJitResult CallCompileMethodWithSEHWrapper(EEJitManager *jitMgr,
// Figures out (some of) the flags to use to compile the method
// Returns the new set to use
-DWORD GetDebuggerCompileFlags(Module* pModule, DWORD flags)
+CORJIT_FLAGS GetDebuggerCompileFlags(Module* pModule, CORJIT_FLAGS flags)
{
STANDARD_VM_CONTRACT;
@@ -12027,36 +12249,37 @@ DWORD GetDebuggerCompileFlags(Module* pModule, DWORD flags)
#ifdef _DEBUG
if (g_pConfig->GenDebuggableCode())
- flags |= CORJIT_FLG_DEBUG_CODE;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE);
#endif // _DEBUG
#ifdef EnC_SUPPORTED
if (pModule->IsEditAndContinueEnabled())
{
- flags |= CORJIT_FLG_DEBUG_EnC;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_EnC);
}
#endif // EnC_SUPPORTED
// Debug info is always tracked
- flags |= CORJIT_FLG_DEBUG_INFO;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_INFO);
#endif // DEBUGGING_SUPPORTED
if (CORDisableJITOptimizations(pModule->GetDebuggerInfoBits()))
{
- flags |= CORJIT_FLG_DEBUG_CODE;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE);
}
- if (flags & CORJIT_FLG_IMPORT_ONLY)
+ if (flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY))
{
// If we are only verifying the method, dont need any debug info and this
// prevents getVars()/getBoundaries() from being called unnecessarily.
- flags &= ~(CORJIT_FLG_DEBUG_INFO|CORJIT_FLG_DEBUG_CODE);
+ flags.Clear(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_INFO);
+ flags.Clear(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE);
}
return flags;
}
-CorJitFlag GetCompileFlags(MethodDesc * ftn, DWORD flags, CORINFO_METHOD_INFO * methodInfo)
+CORJIT_FLAGS GetCompileFlags(MethodDesc * ftn, CORJIT_FLAGS flags, CORINFO_METHOD_INFO * methodInfo)
{
STANDARD_VM_CONTRACT;
@@ -12065,14 +12288,14 @@ CorJitFlag GetCompileFlags(MethodDesc * ftn, DWORD flags, CORINFO_METHOD_INFO *
//
// Get the compile flags that are shared between JIT and NGen
//
- flags |= CEEInfo::GetBaseCompileFlags(ftn);
+ flags.Add(CEEInfo::GetBaseCompileFlags(ftn));
//
// Get CPU specific flags
//
- if ((flags & CORJIT_FLG_IMPORT_ONLY) == 0)
+ if (!flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY))
{
- flags |= ExecutionManager::GetEEJitManager()->GetCPUCompileFlags();
+ flags.Add(ExecutionManager::GetEEJitManager()->GetCPUCompileFlags());
}
//
@@ -12080,21 +12303,19 @@ CorJitFlag GetCompileFlags(MethodDesc * ftn, DWORD flags, CORINFO_METHOD_INFO *
//
#ifdef DEBUGGING_SUPPORTED
- flags |= GetDebuggerCompileFlags(ftn->GetModule(), flags);
+ flags.Add(GetDebuggerCompileFlags(ftn->GetModule(), flags));
#endif
#ifdef PROFILING_SUPPORTED
- if (CORProfilerTrackEnterLeave()
- && !ftn->IsNoMetadata()
- )
- flags |= CORJIT_FLG_PROF_ENTERLEAVE;
+ if (CORProfilerTrackEnterLeave() && !ftn->IsNoMetadata())
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_PROF_ENTERLEAVE);
if (CORProfilerTrackTransitions())
- flags |= CORJIT_FLG_PROF_NO_PINVOKE_INLINE;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_PROF_NO_PINVOKE_INLINE);
#endif // PROFILING_SUPPORTED
// Set optimization flags
- if (0 == (flags & CORJIT_FLG_MIN_OPT))
+ if (!flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_MIN_OPT))
{
unsigned optType = g_pConfig->GenOptimizeType();
_ASSERTE(optType <= OPT_RANDOM);
@@ -12103,18 +12324,16 @@ CorJitFlag GetCompileFlags(MethodDesc * ftn, DWORD flags, CORINFO_METHOD_INFO *
optType = methodInfo->ILCodeSize % OPT_RANDOM;
if (g_pConfig->JitMinOpts())
- flags |= CORJIT_FLG_MIN_OPT;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MIN_OPT);
- const static unsigned optTypeFlags[] =
+ if (optType == OPT_SIZE)
{
- 0, // OPT_BLENDED
- CORJIT_FLG_SIZE_OPT, // OPT_CODE_SIZE
- CORJIT_FLG_SPEED_OPT // OPT_CODE_SPEED
- };
-
- _ASSERTE(optType < OPT_RANDOM);
- _ASSERTE((sizeof(optTypeFlags)/sizeof(optTypeFlags[0])) == OPT_RANDOM);
- flags |= optTypeFlags[optType];
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_SIZE_OPT);
+ }
+ else if (optType == OPT_SPEED)
+ {
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_SPEED_OPT);
+ }
}
//
@@ -12123,22 +12342,21 @@ CorJitFlag GetCompileFlags(MethodDesc * ftn, DWORD flags, CORINFO_METHOD_INFO *
#ifdef _DEBUG
if (g_pConfig->IsJitVerificationDisabled())
- flags |= CORJIT_FLG_SKIP_VERIFICATION;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION);
#endif // _DEBUG
- if ((flags & CORJIT_FLG_IMPORT_ONLY) == 0 &&
- Security::CanSkipVerification(ftn))
- flags |= CORJIT_FLG_SKIP_VERIFICATION;
+ if (!flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY) && Security::CanSkipVerification(ftn))
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION);
if (ftn->IsILStub())
{
- flags |= CORJIT_FLG_SKIP_VERIFICATION;
+ flags.Set(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION);
// no debug info available for IL stubs
- flags &= ~CORJIT_FLG_DEBUG_INFO;
+ flags.Clear(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_INFO);
}
- return (CorJitFlag)flags;
+ return flags;
}
#if defined(_WIN64)
@@ -12148,12 +12366,12 @@ CorJitFlag GetCompileFlags(MethodDesc * ftn, DWORD flags, CORINFO_METHOD_INFO *
//
//This only works for real methods. If the method isn't IsIL, then IsVerifiable will AV. That would be a
//bad thing (TM).
-BOOL IsTransparentMethodSafeToSkipVerification(CorJitFlag flags, MethodDesc * ftn)
+BOOL IsTransparentMethodSafeToSkipVerification(CORJIT_FLAGS flags, MethodDesc * ftn)
{
STANDARD_VM_CONTRACT;
BOOL ret = FALSE;
- if (!(flags & CORJIT_FLG_IMPORT_ONLY) && !(flags & CORJIT_FLG_SKIP_VERIFICATION)
+ if (!flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY) && !flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION)
&& Security::IsMethodTransparent(ftn) &&
((ftn->IsIL() && !ftn->IsUnboxingStub()) ||
(ftn->IsDynamicMethod() && !ftn->IsILStub())))
@@ -12183,9 +12401,9 @@ BOOL IsTransparentMethodSafeToSkipVerification(CorJitFlag flags, MethodDesc * ft
// failed, then we need to throw an exception whenever we try
// to compile a real instantiation
-CorJitFlag GetCompileFlagsIfGenericInstantiation(
+CORJIT_FLAGS GetCompileFlagsIfGenericInstantiation(
CORINFO_METHOD_HANDLE method,
- CorJitFlag compileFlags,
+ CORJIT_FLAGS compileFlags,
ICorJitInfo * pCorJitInfo,
BOOL * raiseVerificationException,
BOOL * unverifiableGenericCode)
@@ -12196,7 +12414,7 @@ CorJitFlag GetCompileFlagsIfGenericInstantiation(
*unverifiableGenericCode = FALSE;
// If we have already decided to skip verification, keep on going.
- if (compileFlags & CORJIT_FLG_SKIP_VERIFICATION)
+ if (compileFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION))
return compileFlags;
CorInfoInstantiationVerification ver = pCorJitInfo->isInstantiationOfVerifiedGeneric(method);
@@ -12206,13 +12424,14 @@ CorJitFlag GetCompileFlagsIfGenericInstantiation(
case INSTVER_NOT_INSTANTIATION:
// Non-generic, or open instantiation of a generic type/method
if (IsTransparentMethodSafeToSkipVerification(compileFlags, (MethodDesc*)method))
- compileFlags = (CorJitFlag)(compileFlags | CORJIT_FLG_SKIP_VERIFICATION);
+ compileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION);
return compileFlags;
case INSTVER_GENERIC_PASSED_VERIFICATION:
// If the typical instantiation is verifiable, there is no need
// to verify the concrete instantiations
- return (CorJitFlag)(compileFlags | CORJIT_FLG_SKIP_VERIFICATION);
+ compileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION);
+ return compileFlags;
case INSTVER_GENERIC_FAILED_VERIFICATION:
@@ -12238,9 +12457,9 @@ CorJitFlag GetCompileFlagsIfGenericInstantiation(
// hits unverifiable code. Since we've already hit unverifiable code,
// there's no point in starting the JIT, just to have it give up, so we
// give up here.
- _ASSERTE(compileFlags & CORJIT_FLG_PREJIT);
+ _ASSERTE(compileFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_PREJIT));
*raiseVerificationException = TRUE;
- return (CorJitFlag)-1; // This value will not be used
+ return CORJIT_FLAGS(); // This value will not be used
}
#else // FEATURE_PREJIT
// Need to have this case here to keep the MAC build happy
@@ -12259,17 +12478,18 @@ CorJitFlag GetCompileFlagsIfGenericInstantiation(
// branches while compiling the concrete instantiation. Instead,
// just throw a VerificationException right away.
*raiseVerificationException = TRUE;
- return (CorJitFlag)-1; // This value will not be used
+ return CORJIT_FLAGS(); // This value will not be used
}
case CORINFO_VERIFICATION_CAN_SKIP:
{
- return (CorJitFlag)(compileFlags | CORJIT_FLG_SKIP_VERIFICATION);
+ compileFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION);
+ return compileFlags;
}
case CORINFO_VERIFICATION_RUNTIME_CHECK:
{
- // Compile the method without CORJIT_FLG_SKIP_VERIFICATION.
+ // Compile the method without CORJIT_FLAG_SKIP_VERIFICATION.
// The compiler will know to add a call to
// CORINFO_HELP_VERIFICATION_RUNTIME_CHECK, and then to skip verification.
return compileFlags;
@@ -12344,8 +12564,8 @@ BOOL g_fAllowRel32 = TRUE;
// Calls to this method that occur to check if inlining can occur on x86,
// are OK since they discard the return value of this method.
-PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader,
- DWORD flags, DWORD flags2, ULONG * pSizeOfCode)
+PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS flags,
+ ULONG * pSizeOfCode)
{
STANDARD_VM_CONTRACT;
@@ -12359,9 +12579,9 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader,
ftn->GetModule()->GetDomainFile()->IsZapRequired() &&
PartialNGenStressPercentage() == 0 &&
#ifdef FEATURE_STACK_SAMPLING
- !(flags2 & CORJIT_FLG2_SAMPLING_JIT_BACKGROUND) &&
+ !flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_SAMPLING_JIT_BACKGROUND) &&
#endif
- !(flags & CORJIT_FLG_IMPORT_ONLY))
+ !flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY))
{
StackSString ss(SString::Ascii, "ZapRequire: JIT compiler invoked for ");
TypeString::AppendMethodInternal(ss, ftn);
@@ -12398,6 +12618,13 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader,
EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Failed to load JIT compiler"));
#endif // ALLOW_SXS_JIT
}
+
+ // If no compatjit wasn't used, but the user (normally a test case) requires that one is used, then fail.
+ // This is analogous to ZapRequire.
+ if (!jitMgr->m_fLegacyJitUsed && (CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_RequireLegacyJit) == 1))
+ {
+ EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_EXECUTIONENGINE, W("Failed to use legacy JIT compiler with RequireLegacyJit set"));
+ }
#endif // CROSSGEN_COMPILE
#ifdef _DEBUG
@@ -12444,10 +12671,10 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader,
getMethodInfoHelper(ftn, ftnHnd, ILHeader, &methodInfo);
// If it's generic then we can only enter through an instantiated md (unless we're just verifying it)
- _ASSERTE((flags & CORJIT_FLG_IMPORT_ONLY) != 0 || !ftn->IsGenericMethodDefinition());
+ _ASSERTE(flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY) || !ftn->IsGenericMethodDefinition());
// If it's an instance method then it must not be entered from a generic class
- _ASSERTE((flags & CORJIT_FLG_IMPORT_ONLY) != 0 || ftn->IsStatic() ||
+ _ASSERTE(flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY) || ftn->IsStatic() ||
ftn->GetNumGenericClassArgs() == 0 || ftn->HasClassInstantiation());
// method attributes and signature are consistant
@@ -12456,7 +12683,7 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader,
flags = GetCompileFlags(ftn, flags, &methodInfo);
#ifdef _DEBUG
- if (!(flags & CORJIT_FLG_SKIP_VERIFICATION))
+ if (!flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_SKIP_VERIFICATION))
{
SString methodString;
if (LoggingOn(LF_VERIFIER, LL_INFO100))
@@ -12488,10 +12715,10 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader,
for (;;)
{
#ifndef CROSSGEN_COMPILE
- CEEJitInfo jitInfo(ftn, ILHeader, jitMgr, (flags & CORJIT_FLG_IMPORT_ONLY) != 0);
+ CEEJitInfo jitInfo(ftn, ILHeader, jitMgr, flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY));
#else
// This path should be only ever used for verification in crossgen and so we should not need EEJitManager
- _ASSERTE((flags & CORJIT_FLG_IMPORT_ONLY) != 0);
+ _ASSERTE(flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY));
CEEInfo jitInfo(ftn, true);
EEJitManager *jitMgr = NULL;
#endif
@@ -12550,7 +12777,7 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader,
flags = GetCompileFlagsIfGenericInstantiation(
ftnHnd,
- (CorJitFlag)flags,
+ flags,
&jitInfo,
&raiseVerificationException,
&unverifiableGenericCode);
@@ -12571,7 +12798,7 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader,
#ifdef PERF_TRACK_METHOD_JITTIMES
//Because we're not calling QPC enough. I'm not going to track times if we're just importing.
LARGE_INTEGER methodJitTimeStart = {0};
- if (!(flags & CORJIT_FLG_IMPORT_ONLY))
+ if (!flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY))
QueryPerformanceCounter (&methodJitTimeStart);
#endif
@@ -12591,7 +12818,6 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader,
&jitInfo,
&methodInfo,
flags,
- flags2,
&nativeEntry,
&sizeOfCode,
(MethodDesc*)ftn);
@@ -12622,7 +12848,7 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader,
#ifdef PERF_TRACK_METHOD_JITTIMES
//store the time in the string buffer. Module name and token are unique enough. Also, do not
//capture importing time, just actual compilation time.
- if (!(flags & CORJIT_FLG_IMPORT_ONLY))
+ if (!flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY))
{
LARGE_INTEGER methodJitTimeStop;
QueryPerformanceCounter(&methodJitTimeStop);
@@ -12653,7 +12879,7 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader,
ThrowExceptionForJit(res);
}
- if (flags & CORJIT_FLG_IMPORT_ONLY)
+ if (flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY))
{
// The method must been processed by the verifier. Note that it may
// either have been marked as verifiable or unverifiable.
@@ -13372,19 +13598,19 @@ BOOL LoadDynamicInfoEntry(Module *currentModule,
break;
case READYTORUN_HELPER_DelayLoad_MethodCall:
- result = (size_t)DelayLoad_MethodCall;
+ result = (size_t)GetEEFuncEntryPoint(DelayLoad_MethodCall);
break;
case READYTORUN_HELPER_DelayLoad_Helper:
- result = (size_t)DelayLoad_Helper;
+ result = (size_t)GetEEFuncEntryPoint(DelayLoad_Helper);
break;
case READYTORUN_HELPER_DelayLoad_Helper_Obj:
- result = (size_t)DelayLoad_Helper_Obj;
+ result = (size_t)GetEEFuncEntryPoint(DelayLoad_Helper_Obj);
break;
case READYTORUN_HELPER_DelayLoad_Helper_ObjObj:
- result = (size_t)DelayLoad_Helper_ObjObj;
+ result = (size_t)GetEEFuncEntryPoint(DelayLoad_Helper_ObjObj);
break;
default:
diff --git a/src/vm/jitinterface.h b/src/vm/jitinterface.h
index 03983f2d3e..ee13b9cec6 100644
--- a/src/vm/jitinterface.h
+++ b/src/vm/jitinterface.h
@@ -28,6 +28,9 @@ class FieldDesc;
enum RuntimeExceptionKind;
class AwareLock;
class PtrArray;
+#if defined(FEATURE_GDBJIT)
+class CalledMethod;
+#endif
#include "genericdict.h"
@@ -51,7 +54,7 @@ void InitJITHelpers1();
void InitJITHelpers2();
PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* header,
- DWORD flags, DWORD flags2, ULONG* sizeOfCode = NULL);
+ CORJIT_FLAGS flags, ULONG* sizeOfCode = NULL);
void getMethodInfoHelper(MethodDesc * ftn,
CORINFO_METHOD_HANDLE ftnHnd,
@@ -644,7 +647,7 @@ public:
);
// Returns that compilation flags that are shared between JIT and NGen
- static DWORD GetBaseCompileFlags(MethodDesc * ftn);
+ static CORJIT_FLAGS GetBaseCompileFlags(MethodDesc * ftn);
// Resolve metadata token into runtime method handles.
void resolveToken(/* IN, OUT */ CORINFO_RESOLVED_TOKEN * pResolvedToken);
@@ -1090,6 +1093,9 @@ public:
m_pThread(GetThread()),
m_hMethodForSecurity_Key(NULL),
m_pMethodForSecurity_Value(NULL)
+#if defined(FEATURE_GDBJIT)
+ , m_pCalledMethods(NULL)
+#endif
{
LIMITED_METHOD_CONTRACT;
}
@@ -1151,6 +1157,10 @@ public:
MethodDesc * pTemplateMD /* for method-based slots */,
CORINFO_LOOKUP *pResultLookup);
+#if defined(FEATURE_GDBJIT)
+ CalledMethod * GetCalledMethods() { return m_pCalledMethods; }
+#endif
+
protected:
// NGen provides its own modifications to EE-JIT interface. From technical reason it cannot simply inherit
// from code:CEEInfo class (because it has dependencies on VM that NGen does not want).
@@ -1174,6 +1184,10 @@ protected:
CORINFO_METHOD_HANDLE m_hMethodForSecurity_Key;
MethodDesc * m_pMethodForSecurity_Value;
+#if defined(FEATURE_GDBJIT)
+ CalledMethod * m_pCalledMethods;
+#endif
+
// Tracking of module activation dependencies. We have two flavors:
// - Fast one that gathers generic arguments from EE handles, but does not work inside generic context.
// - Slow one that operates on typespec and methodspecs from metadata.
@@ -1641,7 +1655,7 @@ struct VirtualFunctionPointerArgs
FCDECL2(CORINFO_MethodPtr, JIT_VirtualFunctionPointer_Dynamic, Object * objectUNSAFE, VirtualFunctionPointerArgs * pArgs);
-typedef TADDR (F_CALL_CONV * FnStaticBaseHelper)(TADDR arg0, TADDR arg1);
+typedef HCCALL2_PTR(TADDR, FnStaticBaseHelper, TADDR arg0, TADDR arg1);
struct StaticFieldAddressArgs
{
@@ -1677,8 +1691,8 @@ public:
static FCDECL3(void, UnsafeSetArrayElement, PtrArray* pPtrArray, INT32 index, Object* object);
};
-DWORD GetDebuggerCompileFlags(Module* pModule, DWORD flags);
+CORJIT_FLAGS GetDebuggerCompileFlags(Module* pModule, CORJIT_FLAGS flags);
-bool TrackAllocationsEnabled();
+bool __stdcall TrackAllocationsEnabled();
#endif // JITINTERFACE_H
diff --git a/src/vm/jitinterfacegen.cpp b/src/vm/jitinterfacegen.cpp
index 0a90dc347d..f8a95bb759 100644
--- a/src/vm/jitinterfacegen.cpp
+++ b/src/vm/jitinterfacegen.cpp
@@ -221,7 +221,7 @@ void InitJITHelpers1()
))
{
// if (multi-proc || server GC)
- if (GCHeap::UseAllocationContexts())
+ if (GCHeapUtilities::UseAllocationContexts())
{
#ifdef FEATURE_IMPLICIT_TLS
SetJitHelperFunction(CORINFO_HELP_NEWSFAST, JIT_NewS_MP_FastPortable);
diff --git a/src/vm/marshalnative.cpp b/src/vm/marshalnative.cpp
index 48911b7190..5f05fa2daf 100644
--- a/src/vm/marshalnative.cpp
+++ b/src/vm/marshalnative.cpp
@@ -27,7 +27,7 @@
#include "log.h"
#include "fieldmarshaler.h"
#include "cgensys.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "security.h"
#include "dbginterface.h"
#include "objecthandle.h"
diff --git a/src/vm/marshalnative.h b/src/vm/marshalnative.h
index 4f6ac854ff..cff3f7eb63 100644
--- a/src/vm/marshalnative.h
+++ b/src/vm/marshalnative.h
@@ -229,7 +229,7 @@ public:
static FCDECL2(void, ChangeWrapperHandleStrength, Object* orefUNSAFE, CLR_BOOL fIsWeak);
static FCDECL2(void, InitializeWrapperForWinRT, Object *unsafe_pThis, IUnknown **ppUnk);
static FCDECL2(void, InitializeManagedWinRTFactoryObject, Object *unsafe_pThis, ReflectClassBaseObject *unsafe_pType);
- static FCDECL1(Object *, MarshalNative::GetNativeActivationFactory, ReflectClassBaseObject *unsafe_pType);
+ static FCDECL1(Object *, GetNativeActivationFactory, ReflectClassBaseObject *unsafe_pType);
static void QCALLTYPE GetInspectableIIDs(QCall::ObjectHandleOnStack hobj, QCall::ObjectHandleOnStack retArrayGuids);
static void QCALLTYPE GetCachedWinRTTypes(QCall::ObjectHandleOnStack hadObj, int * epoch, QCall::ObjectHandleOnStack retArrayMT);
static void QCALLTYPE GetCachedWinRTTypeByIID(QCall::ObjectHandleOnStack hadObj, GUID iid, void * * ppMT);
diff --git a/src/vm/mdaassistants.cpp b/src/vm/mdaassistants.cpp
index cc598c0a6c..e52e8ff8ec 100644
--- a/src/vm/mdaassistants.cpp
+++ b/src/vm/mdaassistants.cpp
@@ -137,7 +137,7 @@ void TriggerGCForMDAInternal()
EX_TRY
{
- GCHeap::GetGCHeap()->GarbageCollect();
+ GCHeapUtilities::GetGCHeap()->GarbageCollect();
#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
//
@@ -868,7 +868,7 @@ LPVOID MdaInvalidOverlappedToPinvoke::CheckOverlappedPointer(UINT index, LPVOID
{
GCX_COOP();
- GCHeap *pHeap = GCHeap::GetGCHeap();
+ IGCHeap *pHeap = GCHeapUtilities::GetGCHeap();
fHeapPointer = pHeap->IsHeapPointer(pOverlapped);
}
diff --git a/src/vm/memberload.cpp b/src/vm/memberload.cpp
index 8b7b2ce69c..1b24300a68 100644
--- a/src/vm/memberload.cpp
+++ b/src/vm/memberload.cpp
@@ -30,7 +30,7 @@
#include "log.h"
#include "fieldmarshaler.h"
#include "cgensys.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "security.h"
#include "dbginterface.h"
#include "comdelegate.h"
diff --git a/src/vm/message.cpp b/src/vm/message.cpp
index fa0370dd33..093f9a2629 100644
--- a/src/vm/message.cpp
+++ b/src/vm/message.cpp
@@ -249,7 +249,7 @@ void CMessage::GetObjectFromStack(OBJECTREF* ppDest, PVOID val, const CorElement
_ASSERTE(ty.GetMethodTable()->IsValueType() || ty.GetMethodTable()->IsEnum());
- _ASSERTE(!GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) ppDest) ||
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) ppDest) ||
!"(pDest) can not point to GC Heap");
MethodTable* pMT = ty.GetMethodTable();
diff --git a/src/vm/metasig.h b/src/vm/metasig.h
index 5bf2903eab..a8404615a1 100644
--- a/src/vm/metasig.h
+++ b/src/vm/metasig.h
@@ -302,7 +302,10 @@ DEFINE_METASIG(SM(Int_IntPtr_RetObj, i I, j))
DEFINE_METASIG(SM(IntPtr_IntPtr_Int_RetVoid, I I i, v))
DEFINE_METASIG_T(SM(Exception_RetInt, C(EXCEPTION), i))
+#ifdef FEATURE_REMOTING
DEFINE_METASIG_T(SM(ContextBoundObject_RetObj, C(CONTEXT_BOUND_OBJECT), j))
+#endif
+
DEFINE_METASIG_T(SM(PMS_PMS_RetInt, C(PERMISSION_SET) C(PERMISSION_SET), i))
DEFINE_METASIG(SM(IntPtr_RetVoid, I, v))
diff --git a/src/vm/method.cpp b/src/vm/method.cpp
index 70714b710d..7afe0e9de2 100644
--- a/src/vm/method.cpp
+++ b/src/vm/method.cpp
@@ -1078,7 +1078,7 @@ BOOL MethodDesc::IsVerifiable()
#endif // _VER_EE_VERIFICATION_ENABLED
}
- UnsafeJitFunction(this, pHeader, CORJIT_FLG_IMPORT_ONLY, 0);
+ UnsafeJitFunction(this, pHeader, CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_IMPORT_ONLY));
_ASSERTE(IsVerified());
return (IsVerified() && (m_wFlags & mdcVerifiable));
@@ -3249,6 +3249,7 @@ bool MethodDesc::CanSkipDoPrestub (
return false;
}
+#ifdef FEATURE_CER
// Can't hard bind to a method which contains one or more Constrained Execution Region roots (we need to force the prestub to
// execute for such methods).
if (ContainsPrePreparableCerRoot(this))
@@ -3256,6 +3257,7 @@ bool MethodDesc::CanSkipDoPrestub (
*pReason = CORINFO_INDIRECT_CALL_CER;
return false;
}
+#endif // FEATURE_CER
// Check whether our methoddesc needs restore
if (NeedsRestore(GetAppDomain()->ToCompilationDomain()->GetTargetImage(), TRUE))
diff --git a/src/vm/method.hpp b/src/vm/method.hpp
index 3cdd794f08..499112d149 100644
--- a/src/vm/method.hpp
+++ b/src/vm/method.hpp
@@ -1649,7 +1649,7 @@ public:
PCODE DoPrestub(MethodTable *pDispatchingMT);
- PCODE MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, DWORD flags, DWORD flags2);
+ PCODE MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS flags);
VOID GetMethodInfo(SString &namespaceOrClassName, SString &methodName, SString &methodSignature);
VOID GetMethodInfoWithNewSig(SString &namespaceOrClassName, SString &methodName, SString &methodSignature);
@@ -3596,6 +3596,22 @@ inline BOOL MethodDesc::HasMethodInstantiation() const
return mcInstantiated == GetClassification() && AsInstantiatedMethodDesc()->IMD_HasMethodInstantiation();
}
+#if defined(FEATURE_GDBJIT)
+class CalledMethod
+{
+private:
+ MethodDesc * m_pMD;
+ void * m_CallAddr;
+ CalledMethod * m_pNext;
+public:
+ CalledMethod(MethodDesc *pMD, void * addr, CalledMethod * next) : m_pMD(pMD), m_CallAddr(addr), m_pNext(next) {}
+ ~CalledMethod() {}
+ MethodDesc * GetMethodDesc() { return m_pMD; }
+ void * GetCallAddr() { return m_CallAddr; }
+ CalledMethod * GetNext() { return m_pNext; }
+};
+#endif
+
#include "method.inl"
#endif // !_METHOD_H
diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
index bf863826d4..52a2ce4d98 100644
--- a/src/vm/methodtable.cpp
+++ b/src/vm/methodtable.cpp
@@ -33,7 +33,7 @@
#include "log.h"
#include "fieldmarshaler.h"
#include "cgensys.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "security.h"
#include "dbginterface.h"
#include "comdelegate.h"
@@ -9804,11 +9804,11 @@ BOOL MethodTable::Validate()
}
DWORD dwLastVerifiedGCCnt = m_pWriteableData->m_dwLastVerifedGCCnt;
- // Here we used to assert that (dwLastVerifiedGCCnt <= GCHeap::GetGCHeap()->GetGcCount()) but
+ // Here we used to assert that (dwLastVerifiedGCCnt <= GCHeapUtilities::GetGCHeap()->GetGcCount()) but
// this is no longer true because with background gc. Since the purpose of having
// m_dwLastVerifedGCCnt is just to only verify the same method table once for each GC
// I am getting rid of the assert.
- if (g_pConfig->FastGCStressLevel () > 1 && dwLastVerifiedGCCnt == GCHeap::GetGCHeap()->GetGcCount())
+ if (g_pConfig->FastGCStressLevel () > 1 && dwLastVerifiedGCCnt == GCHeapUtilities::GetGCHeap()->GetGcCount())
return TRUE;
#endif //_DEBUG
@@ -9835,7 +9835,7 @@ BOOL MethodTable::Validate()
// It is not a fatal error to fail the update the counter. We will run slower and retry next time,
// but the system will function properly.
if (EnsureWritablePagesNoThrow(m_pWriteableData, sizeof(MethodTableWriteableData)))
- m_pWriteableData->m_dwLastVerifedGCCnt = GCHeap::GetGCHeap()->GetGcCount();
+ m_pWriteableData->m_dwLastVerifedGCCnt = GCHeapUtilities::GetGCHeap()->GetGcCount();
#endif //_DEBUG
return TRUE;
diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
index 70c0e3b8cb..682268eb39 100644
--- a/src/vm/methodtablebuilder.cpp
+++ b/src/vm/methodtablebuilder.cpp
@@ -191,6 +191,7 @@ MethodTableBuilder::CreateClass( Module *pModule,
pEEClass->GetSecurityProperties()->SetFlags(dwSecFlags, dwNullDeclFlags);
}
+#ifdef FEATURE_CER
// Cache class level reliability contract info.
DWORD dwReliabilityContract = ::GetReliabilityContract(pInternalImport, cl);
if (dwReliabilityContract != RC_NULL)
@@ -201,6 +202,7 @@ MethodTableBuilder::CreateClass( Module *pModule,
pEEClass->SetReliabilityContract(dwReliabilityContract);
}
+#endif // FEATURE_CER
if (fHasLayout)
pEEClass->SetHasLayout();
@@ -1218,7 +1220,7 @@ BOOL MethodTableBuilder::CheckIfSIMDAndUpdateSize()
{
STANDARD_VM_CONTRACT;
-#ifdef _TARGET_AMD64_
+#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
if (!GetAssembly()->IsSIMDVectorAssembly())
return false;
@@ -1245,8 +1247,8 @@ BOOL MethodTableBuilder::CheckIfSIMDAndUpdateSize()
EEJitManager *jitMgr = ExecutionManager::GetEEJitManager();
if (jitMgr->LoadJIT())
{
- DWORD cpuCompileFlags = jitMgr->GetCPUCompileFlags();
- if ((cpuCompileFlags & CORJIT_FLG_FEATURE_SIMD) != 0)
+ CORJIT_FLAGS cpuCompileFlags = jitMgr->GetCPUCompileFlags();
+ if (cpuCompileFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_FEATURE_SIMD))
{
unsigned intrinsicSIMDVectorLength = jitMgr->m_jit->getMaxIntrinsicSIMDVectorLength(cpuCompileFlags);
if (intrinsicSIMDVectorLength != 0)
@@ -1262,7 +1264,7 @@ BOOL MethodTableBuilder::CheckIfSIMDAndUpdateSize()
}
}
#endif // !CROSSGEN_COMPILE
-#endif // _TARGET_AMD64_
+#endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
return false;
}
@@ -1860,6 +1862,11 @@ MethodTableBuilder::BuildMethodTableThrowing(
pMT->SetHasBoxedRegularStatics();
}
+ if (bmtFP->fIsByRefLikeType)
+ {
+ pMT->SetIsByRefLike();
+ }
+
if (IsValueClass())
{
if (bmtFP->NumInstanceFieldBytes != totalDeclaredFieldSize || HasOverLayedField())
@@ -4212,14 +4219,12 @@ VOID MethodTableBuilder::InitializeFieldDescs(FieldDesc *pFieldDescList,
goto GOT_ELEMENT_TYPE;
}
- // There are just few types with code:IsByRefLike set - see code:CheckForSystemTypes.
- // Note: None of them will ever have self-referencing static ValueType field (we cannot assert it now because the IsByRefLike
- // status for this type has not been initialized yet).
+ // Inherit IsByRefLike characteristic from fields
if (!IsSelfRef(pByValueClass) && pByValueClass->IsByRefLike())
- { // Cannot have embedded valuetypes that contain a field that require stack allocation.
- BuildMethodTableThrowException(COR_E_BADIMAGEFORMAT, IDS_CLASSLOAD_BAD_FIELD, mdTokenNil);
+ {
+ bmtFP->fIsByRefLikeType = true;
}
-
+
if (!IsSelfRef(pByValueClass) && pByValueClass->GetClass()->HasNonPublicFields())
{ // If a class has a field of type ValueType with non-public fields in it,
// the class must "inherit" this characteristic
@@ -10201,15 +10206,27 @@ void MethodTableBuilder::CheckForSystemTypes()
MethodTable * pMT = GetHalfBakedMethodTable();
EEClass * pClass = GetHalfBakedClass();
- // We can exit early for generic types - there is just one case to check for.
- if (g_pNullableClass != NULL && bmtGenerics->HasInstantiation())
+ // We can exit early for generic types - there are just a few cases to check for.
+ if (bmtGenerics->HasInstantiation() && g_pNullableClass != NULL)
{
+#ifdef FEATURE_SPAN_OF_T
+ _ASSERTE(g_pByReferenceClass != NULL);
+ _ASSERTE(g_pByReferenceClass->IsByRefLike());
+
+ if (GetCl() == g_pByReferenceClass->GetCl())
+ {
+ pMT->SetIsByRefLike();
+ return;
+ }
+#endif
+
_ASSERTE(g_pNullableClass->IsNullable());
// Pre-compute whether the class is a Nullable<T> so that code:Nullable::IsNullableType is efficient
// This is useful to the performance of boxing/unboxing a Nullable
if (GetCl() == g_pNullableClass->GetCl())
pMT->SetIsNullable();
+
return;
}
@@ -10257,6 +10274,12 @@ void MethodTableBuilder::CheckForSystemTypes()
{
pMT->SetIsNullable();
}
+#ifdef FEATURE_SPAN_OF_T
+ else if (strcmp(name, g_ByReferenceName) == 0)
+ {
+ pMT->SetIsByRefLike();
+ }
+#endif
else if (strcmp(name, g_ArgIteratorName) == 0)
{
// Mark the special types that have embeded stack poitners in them
diff --git a/src/vm/methodtablebuilder.h b/src/vm/methodtablebuilder.h
index 1e40ea996c..1cf71499e6 100644
--- a/src/vm/methodtablebuilder.h
+++ b/src/vm/methodtablebuilder.h
@@ -2081,6 +2081,7 @@ private:
DWORD NumGCPointerSeries;
DWORD NumInstanceFieldBytes;
+ bool fIsByRefLikeType;
bool fHasFixedAddressValueTypes;
bool fHasSelfReferencingStaticValueTypeField_WithRVA;
diff --git a/src/vm/microsoft.comservices_i.c b/src/vm/microsoft.comservices_i.c
index d70bc8cefe..f31a92f53e 100644
--- a/src/vm/microsoft.comservices_i.c
+++ b/src/vm/microsoft.comservices_i.c
@@ -23,14 +23,13 @@
#if !defined(_M_IA64) && !defined(_M_AXP64)
+#include <rpc.h>
+#include <rpcndr.h>
+
#ifdef __cplusplus
extern "C"{
#endif
-
-#include <rpc.h>
-#include <rpcndr.h>
-
#ifdef _MIDL_USE_GUIDDEF_
#ifndef INITGUID
@@ -109,14 +108,13 @@ MIDL_DEFINE_GUID(CLSID, CLSID_RegistrationHelperTx,0x89A86E7B,0xC229,0x4008,0x9B
#if defined(_M_IA64) || defined(_M_AXP64)
+#include <rpc.h>
+#include <rpcndr.h>
+
#ifdef __cplusplus
extern "C"{
#endif
-
-#include <rpc.h>
-#include <rpcndr.h>
-
#ifdef _MIDL_USE_GUIDDEF_
#ifndef INITGUID
@@ -153,7 +151,7 @@ typedef IID CLSID;
#define MIDL_DEFINE_GUID(type,name,l,w1,w2,b1,b2,b3,b4,b5,b6,b7,b8) \
const type name = {l,w1,w2,{b1,b2,b3,b4,b5,b6,b7,b8}}
-#endif !_MIDL_USE_GUIDDEF_
+#endif // !_MIDL_USE_GUIDDEF_
MIDL_DEFINE_GUID(IID, LIBID_Microsoft_ComServices,0xD7F68C66,0x3833,0x3832,0xB6,0xD0,0xB7,0x96,0xBB,0x7D,0x2D,0xFF);
diff --git a/src/vm/mlinfo.cpp b/src/vm/mlinfo.cpp
index ab2545296e..74bd536969 100644
--- a/src/vm/mlinfo.cpp
+++ b/src/vm/mlinfo.cpp
@@ -4865,6 +4865,8 @@ void ArrayMarshalInfo::InitForHiddenLengthArray(TypeHandle thElement)
{
STANDARD_VM_CONTRACT;
+ MethodTable *pMT = NULL;
+
// WinRT supports arrays of any WinRT-legal types
if (thElement.IsArray())
{
@@ -4877,7 +4879,7 @@ void ArrayMarshalInfo::InitForHiddenLengthArray(TypeHandle thElement)
m_thElement = thElement;
- MethodTable *pMT = thElement.GetMethodTable();
+ pMT = thElement.GetMethodTable();
if (pMT->IsString())
{
m_vtElement = VTHACK_HSTRING;
diff --git a/src/vm/mscorlib.h b/src/vm/mscorlib.h
index 291c6abd47..20d658b2e1 100644
--- a/src/vm/mscorlib.h
+++ b/src/vm/mscorlib.h
@@ -462,8 +462,9 @@ DEFINE_METHOD(CONTEXT, CALLBACK, DoCallBackFromEE,
DEFINE_METHOD(CONTEXT, RESERVE_SLOT, ReserveSlot, IM_RetInt)
#endif
+#ifdef FEATURE_REMOTING
DEFINE_CLASS(CONTEXT_BOUND_OBJECT, System, ContextBoundObject)
-
+#endif
#ifdef FEATURE_CRYPTO
DEFINE_CLASS(CSP_PARAMETERS, Cryptography, CspParameters)
@@ -592,9 +593,7 @@ DEFINE_FIELD_U(textInfo, CultureInfoBaseObject, textInfo)
DEFINE_FIELD_U(numInfo, CultureInfoBaseObject, numInfo)
DEFINE_FIELD_U(dateTimeInfo, CultureInfoBaseObject, dateTimeInfo)
DEFINE_FIELD_U(calendar, CultureInfoBaseObject, calendar)
-#ifndef FEATURE_CORECLR
DEFINE_FIELD_U(m_consoleFallbackCulture, CultureInfoBaseObject, m_consoleFallbackCulture)
-#endif // FEATURE_CORECLR
DEFINE_FIELD_U(m_name, CultureInfoBaseObject, m_name)
DEFINE_FIELD_U(m_nonSortName, CultureInfoBaseObject, m_nonSortName)
DEFINE_FIELD_U(m_sortName, CultureInfoBaseObject, m_sortName)
@@ -772,6 +771,7 @@ DEFINE_CLASS(I_RT_FIELD_INFO, System, IRuntimeFieldInfo)
DEFINE_CLASS(FIELD_INFO, Reflection, FieldInfo)
+#ifndef FEATURE_CORECLR
DEFINE_CLASS_U(IO, FileStreamAsyncResult, AsyncResultBase)
DEFINE_FIELD_U(_userCallback, AsyncResultBase, _userCallback)
DEFINE_FIELD_U(_userStateObject, AsyncResultBase, _userStateObject)
@@ -786,6 +786,7 @@ DEFINE_FIELD_U(_isWrite, AsyncResultBase, _isWrite)
DEFINE_FIELD_U(_isComplete, AsyncResultBase, _isComplete)
DEFINE_FIELD_U(_completedSynchronously, AsyncResultBase, _completedSynchronously)
DEFINE_CLASS(FILESTREAM_ASYNCRESULT, IO, FileStreamAsyncResult)
+#endif // !FEATURE_CORECLR
DEFINE_CLASS_U(Security, FrameSecurityDescriptor, FrameSecurityDescriptorBaseObject)
DEFINE_FIELD_U(m_assertions, FrameSecurityDescriptorBaseObject, m_assertions)
@@ -1051,6 +1052,12 @@ DEFINE_FIELD(NULL, VALUE, Value)
DEFINE_CLASS(NULLABLE, System, Nullable`1)
+#ifdef FEATURE_SPAN_OF_T
+DEFINE_CLASS(BYREFERENCE, System, ByReference`1)
+DEFINE_CLASS(SPAN, System, Span`1)
+DEFINE_CLASS(READONLY_SPAN, System, ReadOnlySpan`1)
+#endif
+
// Keep this in sync with System.Globalization.NumberFormatInfo
DEFINE_CLASS_U(Globalization, NumberFormatInfo, NumberFormatInfo)
DEFINE_FIELD_U(numberGroupSizes, NumberFormatInfo, cNumberGroup)
@@ -1085,9 +1092,7 @@ DEFINE_FIELD_U(numberNegativePattern, NumberFormatInfo, cNegativeNumberFormat
DEFINE_FIELD_U(percentPositivePattern, NumberFormatInfo, cPositivePercentFormat)
DEFINE_FIELD_U(percentNegativePattern, NumberFormatInfo, cNegativePercentFormat)
DEFINE_FIELD_U(percentDecimalDigits, NumberFormatInfo, cPercentDecimals)
-#ifndef FEATURE_COREFX_GLOBALIZATION
DEFINE_FIELD_U(digitSubstitution, NumberFormatInfo, iDigitSubstitution)
-#endif
DEFINE_FIELD_U(isReadOnly, NumberFormatInfo, bIsReadOnly)
#ifndef FEATURE_COREFX_GLOBALIZATION
DEFINE_FIELD_U(m_useUserOverride, NumberFormatInfo, bUseUserOverride)
@@ -1338,6 +1343,20 @@ DEFINE_METHOD(JIT_HELPERS, UNSAFE_ENUM_CAST, UnsafeEnumCast, NoSi
DEFINE_METHOD(JIT_HELPERS, UNSAFE_ENUM_CAST_LONG, UnsafeEnumCastLong, NoSig)
DEFINE_METHOD(JIT_HELPERS, UNSAFE_CAST_TO_STACKPTR,UnsafeCastToStackPointer, NoSig)
#endif // _DEBUG
+#ifdef FEATURE_SPAN_OF_T
+DEFINE_METHOD(JIT_HELPERS, BYREF_LESSTHAN, ByRefLessThan, NoSig)
+DEFINE_METHOD(JIT_HELPERS, GET_ARRAY_DATA, GetArrayData, NoSig)
+DEFINE_METHOD(JIT_HELPERS, CONTAINSREFERENCES, ContainsReferences, NoSig)
+#endif
+
+#ifdef FEATURE_SPAN_OF_T
+DEFINE_CLASS(UNSAFE, CompilerServices, Unsafe)
+DEFINE_METHOD(UNSAFE, AS_POINTER, AsPointer, NoSig)
+DEFINE_METHOD(UNSAFE, SIZEOF, SizeOf, NoSig)
+DEFINE_METHOD(UNSAFE, BYREF_AS, As, NoSig)
+DEFINE_METHOD(UNSAFE, BYREF_ADD, Add, NoSig)
+DEFINE_METHOD(UNSAFE, BYREF_ARE_SAME, AreSame, NoSig)
+#endif
DEFINE_CLASS(INTERLOCKED, Threading, Interlocked)
DEFINE_METHOD(INTERLOCKED, COMPARE_EXCHANGE_T, CompareExchange, GM_RefT_T_T_RetT)
@@ -1346,6 +1365,11 @@ DEFINE_METHOD(INTERLOCKED, COMPARE_EXCHANGE_OBJECT,CompareExchange, SM_
DEFINE_CLASS(PINNING_HELPER, CompilerServices, PinningHelper)
DEFINE_FIELD(PINNING_HELPER, M_DATA, m_data)
+#ifdef FEATURE_SPAN_OF_T
+DEFINE_CLASS(ARRAY_PINNING_HELPER, CompilerServices, ArrayPinningHelper)
+DEFINE_FIELD(ARRAY_PINNING_HELPER, M_ARRAY_DATA, m_arrayData)
+#endif
+
DEFINE_CLASS(RUNTIME_WRAPPED_EXCEPTION, CompilerServices, RuntimeWrappedException)
DEFINE_METHOD(RUNTIME_WRAPPED_EXCEPTION, OBJ_CTOR, .ctor, IM_Obj_RetVoid)
DEFINE_FIELD(RUNTIME_WRAPPED_EXCEPTION, WRAPPED_EXCEPTION, m_wrappedException)
@@ -1370,9 +1394,7 @@ DEFINE_CLASS(SAFE_PEFILE_HANDLE, SafeHandles, SafePEFileHandle)
DEFINE_CLASS(SAFE_TOKENHANDLE, SafeHandles, SafeAccessTokenHandle)
#endif
-#ifndef FEATURE_CORECLR
DEFINE_CLASS(SAFE_TYPENAMEPARSER_HANDLE, System, SafeTypeNameParserHandle)
-#endif //!FEATURE_CORECLR
#ifdef FEATURE_COMPRESSEDSTACK
DEFINE_CLASS(SAFE_CSHANDLE, Threading, SafeCompressedStackHandle)
diff --git a/src/vm/multicorejitplayer.cpp b/src/vm/multicorejitplayer.cpp
index 0c69fdcf94..7d13bbc462 100644
--- a/src/vm/multicorejitplayer.cpp
+++ b/src/vm/multicorejitplayer.cpp
@@ -556,7 +556,7 @@ bool MulticoreJitProfilePlayer::CompileMethodDesc(Module * pModule, MethodDesc *
#endif
// MakeJitWorker calls back to MulticoreJitCodeStorage::StoreMethodCode under MethodDesc lock
- pMD->MakeJitWorker(& header, CORJIT_FLG_MCJIT_BACKGROUND, 0);
+ pMD->MakeJitWorker(& header, CORJIT_FLAGS(CORJIT_FLAGS::CORJIT_FLAG_MCJIT_BACKGROUND));
return true;
}
diff --git a/src/vm/nativeoverlapped.cpp b/src/vm/nativeoverlapped.cpp
index 2e253f3046..d0afbb648a 100644
--- a/src/vm/nativeoverlapped.cpp
+++ b/src/vm/nativeoverlapped.cpp
@@ -35,14 +35,14 @@ FCIMPL3(void, CheckVMForIOPacket, LPOVERLAPPED* lpOverlapped, DWORD* errorCode,
{
FCALL_CONTRACT;
-#ifndef FEATURE_PAL
+#ifndef FEATURE_PAL
Thread *pThread = GetThread();
DWORD adid = pThread->GetDomain()->GetId().m_dwId;
size_t key=0;
- _ASSERTE(pThread);
+ _ASSERTE(pThread);
- //Poll and wait if GC is in progress, to avoid blocking GC for too long.
+ //Poll and wait if GC is in progress, to avoid blocking GC for too long.
FC_GC_POLL();
*lpOverlapped = ThreadpoolMgr::CompletionPortDispatchWorkWithinAppDomain(pThread, errorCode, numBytes, &key, adid);
@@ -59,13 +59,15 @@ FCIMPL3(void, CheckVMForIOPacket, LPOVERLAPPED* lpOverlapped, DWORD* errorCode,
if(overlapped->m_iocb == NULL)
{
// no user delegate to callback
- _ASSERTE((overlapped->m_iocbHelper == NULL) || !"This is benign, but should be optimized");
+ _ASSERTE((overlapped->m_iocbHelper == NULL) || !"This is benign, but should be optimized");
+#ifndef FEATURE_CORECLR
if (g_pAsyncFileStream_AsyncResultClass)
{
SetAsyncResultProperties(overlapped, *errorCode, *numBytes);
- }
- else
+ }
+ else
+#endif // !FEATURE_CORECLR
{
//We're not initialized yet, go back to the Vm, and process the packet there.
ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped);
@@ -75,7 +77,7 @@ FCIMPL3(void, CheckVMForIOPacket, LPOVERLAPPED* lpOverlapped, DWORD* errorCode,
return;
}
else
- {
+ {
if(!pThread->IsRealThreadPoolResetNeeded())
{
pThread->ResetManagedThreadObjectInCoopMode(ThreadNative::PRIORITY_NORMAL);
@@ -84,7 +86,7 @@ FCIMPL3(void, CheckVMForIOPacket, LPOVERLAPPED* lpOverlapped, DWORD* errorCode,
{
//We may have to create a CP thread, go back to the Vm, and process the packet there.
ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped);
- *lpOverlapped = NULL;
+ *lpOverlapped = NULL;
}
}
else
@@ -93,7 +95,7 @@ FCIMPL3(void, CheckVMForIOPacket, LPOVERLAPPED* lpOverlapped, DWORD* errorCode,
//and process the packet there.
ThreadpoolMgr::StoreOverlappedInfoInThread(pThread, *errorCode, *numBytes, key, *lpOverlapped);
- *lpOverlapped = NULL;
+ *lpOverlapped = NULL;
}
}
@@ -105,8 +107,8 @@ FCIMPL3(void, CheckVMForIOPacket, LPOVERLAPPED* lpOverlapped, DWORD* errorCode,
*lpOverlapped = NULL;
#endif // !FEATURE_PAL
- return;
-}
+ return;
+}
FCIMPLEND
FCIMPL1(void*, AllocateNativeOverlapped, OverlappedDataObject* overlappedUNSAFE)
diff --git a/src/vm/nativeoverlapped.h b/src/vm/nativeoverlapped.h
index 854090c35f..0c0693dca6 100644
--- a/src/vm/nativeoverlapped.h
+++ b/src/vm/nativeoverlapped.h
@@ -22,6 +22,7 @@ class OverlappedDataObject : public Object
{
public:
ASYNCRESULTREF m_asyncResult;
+
OBJECTREF m_iocb;
OBJECTREF m_iocbHelper;
OBJECTREF m_overlapped;
@@ -62,7 +63,7 @@ public:
STATIC_CONTRACT_SO_TOLERANT;
_ASSERTE (nativeOverlapped != NULL);
- _ASSERTE (GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) nativeOverlapped));
+ _ASSERTE (GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) nativeOverlapped));
return (OverlappedDataObject*)((BYTE*)nativeOverlapped - offsetof(OverlappedDataObject, Internal));
}
diff --git a/src/vm/object.cpp b/src/vm/object.cpp
index 7c47e26627..531c0015f2 100644
--- a/src/vm/object.cpp
+++ b/src/vm/object.cpp
@@ -17,7 +17,7 @@
#include "threads.h"
#include "excep.h"
#include "eeconfig.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#ifdef FEATURE_REMOTING
#include "remoting.h"
#endif
@@ -243,7 +243,7 @@ TypeHandle Object::GetGCSafeTypeHandleIfPossible() const
//
// where MyRefType2's module was unloaded by the time the GC occurred. In at least
// one case, the GC was caused by the AD unload itself (AppDomain::Unload ->
- // AppDomain::Exit -> GCInterface::AddMemoryPressure -> WKS::GCHeap::GarbageCollect).
+ // AppDomain::Exit -> GCInterface::AddMemoryPressure -> WKS::GCHeapUtilities::GarbageCollect).
//
// To protect against all scenarios, verify that
//
@@ -1764,9 +1764,9 @@ VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncB
BOOL bSmallObjectHeapPtr = FALSE, bLargeObjectHeapPtr = FALSE;
if (!noRangeChecks)
{
- bSmallObjectHeapPtr = GCHeap::GetGCHeap()->IsHeapPointer(this, TRUE);
+ bSmallObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this, TRUE);
if (!bSmallObjectHeapPtr)
- bLargeObjectHeapPtr = GCHeap::GetGCHeap()->IsHeapPointer(this);
+ bLargeObjectHeapPtr = GCHeapUtilities::GetGCHeap()->IsHeapPointer(this);
CHECK_AND_TEAR_DOWN(bSmallObjectHeapPtr || bLargeObjectHeapPtr);
}
@@ -1781,7 +1781,7 @@ VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncB
lastTest = 4;
if (bDeep && (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_GC)) {
- GCHeap::GetGCHeap()->ValidateObjectMember(this);
+ GCHeapUtilities::GetGCHeap()->ValidateObjectMember(this);
}
lastTest = 5;
@@ -1790,7 +1790,7 @@ VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncB
// we skip checking noRangeChecks since if skipping
// is enabled bSmallObjectHeapPtr will always be false.
if (bSmallObjectHeapPtr) {
- CHECK_AND_TEAR_DOWN(!GCHeap::GetGCHeap()->IsObjectInFixedHeap(this));
+ CHECK_AND_TEAR_DOWN(!GCHeapUtilities::GetGCHeap()->IsObjectInFixedHeap(this));
}
lastTest = 6;
@@ -1815,9 +1815,9 @@ VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncB
&& bVerifyNextHeader
&& GCScan::GetGcRuntimeStructuresValid ()
//NextObj could be very slow if concurrent GC is going on
- && !(GCHeap::IsGCHeapInitialized() && GCHeap::GetGCHeap ()->IsConcurrentGCInProgress ()))
+ && !(GCHeapUtilities::IsGCHeapInitialized() && GCHeapUtilities::GetGCHeap ()->IsConcurrentGCInProgress ()))
{
- Object * nextObj = GCHeap::GetGCHeap ()->NextObj (this);
+ Object * nextObj = GCHeapUtilities::GetGCHeap ()->NextObj (this);
if ((nextObj != NULL) &&
(nextObj->GetGCSafeMethodTable() != g_pFreeObjectMethodTable))
{
@@ -1949,7 +1949,7 @@ STRINGREF StringObject::NewString(const WCHAR *pwsz)
// pinning and then later put into a struct and that struct is
// then marshalled to managed.
//
- _ASSERTE(!GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) ||
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) ||
!"pwsz can not point to GC Heap");
#endif // 0
@@ -1988,7 +1988,7 @@ STRINGREF StringObject::NewString(const WCHAR *pwsz, int length) {
// pinning and then later put into a struct and that struct is
// then marshalled to managed.
//
- _ASSERTE(!GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) ||
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) pwsz) ||
!"pwsz can not point to GC Heap");
#endif // 0
STRINGREF pString = AllocateString(length);
@@ -2121,9 +2121,11 @@ STRINGREF __stdcall StringObject::StringInitCharHelper(LPCSTR pszSource, int len
if (!pszSource || length == 0) {
return StringObject::GetEmptyString();
}
+#ifndef FEATURE_PAL
else if ((size_t)pszSource < 64000) {
COMPlusThrow(kArgumentException, W("Arg_MustBeStringPtrNotAtom"));
}
+#endif // FEATURE_PAL
// Make sure we can read from the pointer.
// This is better than try to read from the pointer and catch the access violation exceptions.
@@ -2664,7 +2666,7 @@ OBJECTREF::OBJECTREF(const OBJECTREF & objref)
// !!! Either way you need to fix the code.
_ASSERTE(Thread::IsObjRefValid(&objref));
if ((objref.m_asObj != 0) &&
- ((GCHeap*)GCHeap::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
+ ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
{
_ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!");
}
@@ -2718,7 +2720,7 @@ OBJECTREF::OBJECTREF(Object *pObject)
DEBUG_ONLY_FUNCTION;
if ((pObject != 0) &&
- ((GCHeap*)GCHeap::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
+ ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
{
_ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!");
}
@@ -2901,7 +2903,7 @@ OBJECTREF& OBJECTREF::operator=(const OBJECTREF &objref)
_ASSERTE(Thread::IsObjRefValid(&objref));
if ((objref.m_asObj != 0) &&
- ((GCHeap*)GCHeap::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
+ ((IGCHeap*)GCHeapUtilities::GetGCHeap())->IsHeapPointer( (BYTE*)this ))
{
_ASSERTE(!"Write Barrier violation. Must use SetObjectReference() to assign OBJECTREF's into the GC heap!");
}
@@ -2948,14 +2950,14 @@ void* __cdecl GCSafeMemCpy(void * dest, const void * src, size_t len)
{
Thread* pThread = GetThread();
- // GCHeap::IsHeapPointer has race when called in preemptive mode. It walks the list of segments
+ // GCHeapUtilities::IsHeapPointer has race when called in preemptive mode. It walks the list of segments
// that can be modified by GC. Do the check below only if it is safe to do so.
if (pThread != NULL && pThread->PreemptiveGCDisabled())
{
// Note there is memcpyNoGCRefs which will allow you to do a memcpy into the GC
// heap if you really know you don't need to call the write barrier
- _ASSERTE(!GCHeap::GetGCHeap()->IsHeapPointer((BYTE *) dest) ||
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsHeapPointer((BYTE *) dest) ||
!"using memcpy to copy into the GC heap, use CopyValueClass");
}
}
diff --git a/src/vm/object.h b/src/vm/object.h
index 37560cf53c..73ecb62b80 100644
--- a/src/vm/object.h
+++ b/src/vm/object.h
@@ -1856,9 +1856,7 @@ private:
OBJECTREF dateTimeInfo;
OBJECTREF calendar;
OBJECTREF m_cultureData;
-#ifndef FEATURE_CORECLR
OBJECTREF m_consoleFallbackCulture;
-#endif // !FEATURE_CORECLR
STRINGREF m_name; // "real" name - en-US, de-DE_phoneb or fj-FJ
STRINGREF m_nonSortName; // name w/o sort info (de-DE for de-DE_phoneb)
STRINGREF m_sortName; // Sort only name (de-DE_phoneb, en-us for fj-fj (w/us sort)
@@ -2194,15 +2192,28 @@ public:
}
#endif // FEATURE_LEAK_CULTURE_INFO
-#ifndef FEATURE_CORECLR
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+#ifdef FEATURE_CORECLR
OBJECTREF GetSynchronizationContext()
{
- LIMITED_METHOD_CONTRACT;
+ LIMITED_METHOD_CONTRACT;
+ return m_SynchronizationContext;
+ }
+#else // !FEATURE_CORECLR
+ OBJECTREF GetSynchronizationContext()
+ {
+ LIMITED_METHOD_CONTRACT;
if (m_ExecutionContext != NULL)
+ {
return m_ExecutionContext->GetSynchronizationContext();
+ }
return NULL;
}
- OBJECTREF GetExecutionContext()
+#endif // FEATURE_CORECLR
+#endif // FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+
+#ifndef FEATURE_CORECLR
+ OBJECTREF GetExecutionContext()
{
LIMITED_METHOD_CONTRACT;
return (OBJECTREF)m_ExecutionContext;
@@ -2869,8 +2880,10 @@ class FrameSecurityDescriptorBaseObject : public Object
LIMITED_METHOD_CONTRACT;
m_declSecComputed = !!declSec;
}
+#ifndef FEATURE_PAL
LPVOID GetCallerToken();
LPVOID GetImpersonationToken();
+#endif // FEATURE_PAL
};
#ifdef FEATURE_COMPRESSEDSTACK
@@ -4563,9 +4576,7 @@ public:
INT32 cPositivePercentFormat; // positivePercentFormat
INT32 cNegativePercentFormat; // negativePercentFormat
INT32 cPercentDecimals; // percentDecimalDigits
-#ifndef FEATURE_COREFX_GLOBALIZATION
INT32 iDigitSubstitution; // digitSubstitution
-#endif
CLR_BOOL bIsReadOnly; // Is this NumberFormatInfo ReadOnly?
#ifndef FEATURE_COREFX_GLOBALIZATION
diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
index 1f2e8c748c..e0d4096347 100644
--- a/src/vm/prestub.cpp
+++ b/src/vm/prestub.cpp
@@ -228,7 +228,7 @@ void DACNotifyCompilationFinished(MethodDesc *methodDesc)
// Are we listed?
USHORT jnt = jn.Requested((TADDR) modulePtr, t);
if (jnt & CLRDATA_METHNOTIFY_GENERATED)
- {
+ {
// If so, throw an exception!
#endif
DACNotify::DoJITNotification(methodDesc);
@@ -256,7 +256,7 @@ void DACNotifyCompilationFinished(MethodDesc *methodDesc)
// which prevents us from trying to JIT the same method more that once.
-PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, DWORD flags, DWORD flags2)
+PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, CORJIT_FLAGS flags)
{
STANDARD_VM_CONTRACT;
@@ -280,7 +280,7 @@ PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, DWORD flags, DWO
#ifdef FEATURE_MULTICOREJIT
MulticoreJitManager & mcJitManager = GetAppDomain()->GetMulticoreJitManager();
- bool fBackgroundThread = (flags & CORJIT_FLG_MCJIT_BACKGROUND) != 0;
+ bool fBackgroundThread = flags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_MCJIT_BACKGROUND);
#endif
{
@@ -424,22 +424,30 @@ PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, DWORD flags, DWO
{
BEGIN_PIN_PROFILER(CORProfilerTrackJITInfo());
+#ifdef FEATURE_MULTICOREJIT
// Multicore JIT should be disabled when CORProfilerTrackJITInfo is on
// But there could be corner case in which profiler is attached when multicore background thread is calling MakeJitWorker
// Disable this block when calling from multicore JIT background thread
- if (!IsNoMetadata()
-#ifdef FEATURE_MULTICOREJIT
-
- && (! fBackgroundThread)
+ if (!fBackgroundThread)
#endif
- )
{
- g_profControlBlock.pProfInterface->JITCompilationStarted((FunctionID) this, TRUE);
- // The profiler may have changed the code on the callback. Need to
- // pick up the new code. Note that you have to be fully trusted in
- // this mode and the code will not be verified.
- COR_ILMETHOD *pilHeader = GetILHeader(TRUE);
- new (ILHeader) COR_ILMETHOD_DECODER(pilHeader, GetMDImport(), NULL);
+ if (!IsNoMetadata())
+ {
+ g_profControlBlock.pProfInterface->JITCompilationStarted((FunctionID) this, TRUE);
+ // The profiler may have changed the code on the callback. Need to
+ // pick up the new code. Note that you have to be fully trusted in
+ // this mode and the code will not be verified.
+ COR_ILMETHOD *pilHeader = GetILHeader(TRUE);
+ new (ILHeader) COR_ILMETHOD_DECODER(pilHeader, GetMDImport(), NULL);
+ }
+ else
+ {
+ unsigned int ilSize, unused;
+ CorInfoOptions corOptions;
+ LPCBYTE ilHeaderPointer = this->AsDynamicMethodDesc()->GetResolver()->GetCodeInfo(&ilSize, &unused, &corOptions, &unused);
+
+ g_profControlBlock.pProfInterface->DynamicMethodJITCompilationStarted((FunctionID) this, TRUE, ilHeaderPointer, ilSize);
+ }
}
END_PIN_PROFILER();
}
@@ -457,13 +465,13 @@ PCODE MethodDesc::MakeJitWorker(COR_ILMETHOD_DECODER* ILHeader, DWORD flags, DWO
if (!fBackgroundThread)
#endif // FEATURE_MULTICOREJIT
{
- StackSampler::RecordJittingInfo(this, flags, flags2);
+ StackSampler::RecordJittingInfo(this, flags);
}
#endif // FEATURE_STACK_SAMPLING
EX_TRY
{
- pCode = UnsafeJitFunction(this, ILHeader, flags, flags2, &sizeOfCode);
+ pCode = UnsafeJitFunction(this, ILHeader, flags, &sizeOfCode);
}
EX_CATCH
{
@@ -593,6 +601,10 @@ GotNewCode:
pEntry->m_hrResultCode,
TRUE);
}
+ else
+ {
+ g_profControlBlock.pProfInterface->DynamicMethodJITCompilationFinished((FunctionID) this, pEntry->m_hrResultCode, TRUE);
+ }
END_PIN_PROFILER();
}
#endif // PROFILING_SUPPORTED
@@ -1179,7 +1191,7 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
if (g_pConfig->ShouldPrestubGC(this))
{
GCX_COOP();
- GCHeap::GetGCHeap()->GarbageCollect(-1);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(-1);
}
#endif // _DEBUG
@@ -1203,12 +1215,12 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
RETURN GetStableEntryPoint();
}
-#ifdef FEATURE_PREJIT
+#if defined(FEATURE_PREJIT) && defined(FEATURE_CER)
// If this method is the root of a CER call graph and we've recorded this fact in the ngen image then we're in the prestub in
// order to trip any runtime level preparation needed for this graph (P/Invoke stub generation/library binding, generic
// dictionary prepopulation etc.).
GetModule()->RestoreCer(this);
-#endif // FEATURE_PREJIT
+#endif // FEATURE_PREJIT && FEATURE_CER
#ifdef FEATURE_COMINTEROP
/************************** INTEROP *************************/
@@ -1455,7 +1467,7 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
// Mark the code as hot in case the method ends up in the native image
g_IBCLogger.LogMethodCodeAccess(this);
- pCode = MakeJitWorker(pHeader, 0, 0);
+ pCode = MakeJitWorker(pHeader, CORJIT_FLAGS());
#ifdef FEATURE_INTERPRETER
if ((pCode != NULL) && !HasStableEntryPoint())
@@ -1635,9 +1647,9 @@ PCODE MethodDesc::DoPrestub(MethodTable *pDispatchingMT)
// use the prestub.
//==========================================================================
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
static PCODE g_UMThunkPreStub;
-#endif
+#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
#ifndef DACCESS_COMPILE
@@ -1664,9 +1676,9 @@ void InitPreStubManager(void)
return;
}
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
g_UMThunkPreStub = GenerateUMThunkPrestub()->GetEntryPoint();
-#endif
+#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
ThePreStubManager::Init();
}
@@ -1675,11 +1687,11 @@ PCODE TheUMThunkPreStub()
{
LIMITED_METHOD_CONTRACT;
-#ifdef _TARGET_X86_
+#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
return g_UMThunkPreStub;
-#else
+#else // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
return GetEEFuncEntryPoint(TheUMEntryPrestub);
-#endif
+#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
}
PCODE TheVarargNDirectStub(BOOL hasRetBuffArg)
diff --git a/src/vm/profattach.cpp b/src/vm/profattach.cpp
index f03db361f0..5b10e8f10e 100644
--- a/src/vm/profattach.cpp
+++ b/src/vm/profattach.cpp
@@ -806,7 +806,7 @@ void ProfilingAPIAttachDetach::InitializeAttachThreadingMode()
// Environment variable trumps all, so check it first
DWORD dwAlwaysOn = g_pConfig->GetConfigDWORD_DontUse_(
CLRConfig::EXTERNAL_AttachThreadAlwaysOn,
- GCHeap::IsServerHeap() ? 1 : 0); // Default depends on GC server mode
+ GCHeapUtilities::IsServerHeap() ? 1 : 0); // Default depends on GC server mode
if (dwAlwaysOn == 0)
{
diff --git a/src/vm/profilinghelper.cpp b/src/vm/profilinghelper.cpp
index 139ba89ec0..1dd60b47e1 100644
--- a/src/vm/profilinghelper.cpp
+++ b/src/vm/profilinghelper.cpp
@@ -1413,7 +1413,7 @@ void ProfilingAPIUtility::TerminateProfiling()
{
// We know for sure GC has been fully initialized as we've turned off concurrent GC before
_ASSERTE(IsGarbageCollectorFullyInitialized());
- GCHeap::GetGCHeap()->TemporaryEnableConcurrentGC();
+ GCHeapUtilities::GetGCHeap()->TemporaryEnableConcurrentGC();
g_profControlBlock.fConcurrentGCDisabledForAttach = FALSE;
}
diff --git a/src/vm/proftoeeinterfaceimpl.cpp b/src/vm/proftoeeinterfaceimpl.cpp
index 551b38631a..1aee26dde3 100644
--- a/src/vm/proftoeeinterfaceimpl.cpp
+++ b/src/vm/proftoeeinterfaceimpl.cpp
@@ -585,6 +585,10 @@ COM_METHOD ProfToEEInterfaceImpl::QueryInterface(REFIID id, void ** pInterface)
{
*pInterface = static_cast<ICorProfilerInfo7 *>(this);
}
+ else if (id == IID_ICorProfilerInfo8)
+ {
+ *pInterface = static_cast<ICorProfilerInfo8 *>(this);
+ }
else if (id == IID_IUnknown)
{
*pInterface = static_cast<IUnknown *>(static_cast<ICorProfilerInfo *>(this));
@@ -754,7 +758,7 @@ struct GenerationTable
//---------------------------------------------------------------------------------------
//
-// This is a callback used by the GC when we call GCHeap::DescrGenerationsToProfiler
+// This is a callback used by the GC when we call GCHeapUtilities::DiagDescrGenerations
// (from UpdateGenerationBounds() below). The GC gives us generation information through
// this callback, which we use to update the GenerationDesc in the corresponding
// GenerationTable
@@ -874,8 +878,8 @@ void __stdcall UpdateGenerationBounds()
#endif
// fill in the values by calling back into the gc, which will report
// the ranges by calling GenWalkFunc for each one
- GCHeap *hp = GCHeap::GetGCHeap();
- hp->DescrGenerationsToProfiler(GenWalkFunc, newGenerationTable);
+ IGCHeap *hp = GCHeapUtilities::GetGCHeap();
+ hp->DiagDescrGenerations(GenWalkFunc, newGenerationTable);
// remember the old table and plug in the new one
GenerationTable *oldGenerationTable = s_currentGenerationTable;
@@ -1018,7 +1022,7 @@ ClassID SafeGetClassIDFromObject(Object * pObj)
//---------------------------------------------------------------------------------------
//
-// Callback of type walk_fn used by GCHeap::WalkObject. Keeps a count of each
+// Callback of type walk_fn used by GCHeapUtilities::DiagWalkObject. Keeps a count of each
// object reference found.
//
// Arguments:
@@ -1040,7 +1044,7 @@ BOOL CountContainedObjectRef(Object * pBO, void * context)
//---------------------------------------------------------------------------------------
//
-// Callback of type walk_fn used by GCHeap::WalkObject. Stores each object reference
+// Callback of type walk_fn used by GCHeapUtilities::DiagWalkObject. Stores each object reference
// encountered into an array.
//
// Arguments:
@@ -1113,7 +1117,7 @@ BOOL HeapWalkHelper(Object * pBO, void * pvContext)
if (pMT->ContainsPointersOrCollectible())
{
// First round through calculates the number of object refs for this class
- GCHeap::GetGCHeap()->WalkObject(pBO, &CountContainedObjectRef, (void *)&cNumRefs);
+ GCHeapUtilities::GetGCHeap()->DiagWalkObject(pBO, &CountContainedObjectRef, (void *)&cNumRefs);
if (cNumRefs > 0)
{
@@ -1138,7 +1142,7 @@ BOOL HeapWalkHelper(Object * pBO, void * pvContext)
// Second round saves off all of the ref values
OBJECTREF * pCurObjRef = arrObjRef;
- GCHeap::GetGCHeap()->WalkObject(pBO, &SaveContainedObjectRef, (void *)&pCurObjRef);
+ GCHeapUtilities::GetGCHeap()->DiagWalkObject(pBO, &SaveContainedObjectRef, (void *)&pCurObjRef);
}
}
@@ -1959,7 +1963,7 @@ HRESULT GetFunctionInfoInternal(LPCBYTE ip, EECodeInfo * pCodeInfo)
}
-HRESULT GetFunctionFromIPInternal(LPCBYTE ip, EECodeInfo * pCodeInfo)
+HRESULT GetFunctionFromIPInternal(LPCBYTE ip, EECodeInfo * pCodeInfo, BOOL failOnNoMetadata)
{
CONTRACTL
{
@@ -1979,11 +1983,14 @@ HRESULT GetFunctionFromIPInternal(LPCBYTE ip, EECodeInfo * pCodeInfo)
{
return hr;
}
-
- // never return a method that the user of the profiler API cannot use
- if (pCodeInfo->GetMethodDesc()->IsNoMetadata())
+
+ if (failOnNoMetadata)
{
- return E_FAIL;
+ // never return a method that the user of the profiler API cannot use
+ if (pCodeInfo->GetMethodDesc()->IsNoMetadata())
+ {
+ return E_FAIL;
+ }
}
return S_OK;
@@ -2043,7 +2050,7 @@ HRESULT ProfToEEInterfaceImpl::GetFunctionFromIP(LPCBYTE ip, FunctionID * pFunct
EECodeInfo codeInfo;
- hr = GetFunctionFromIPInternal(ip, &codeInfo);
+ hr = GetFunctionFromIPInternal(ip, &codeInfo, /* failOnNoMetadata */ TRUE);
if (FAILED(hr))
{
return hr;
@@ -2096,7 +2103,7 @@ HRESULT ProfToEEInterfaceImpl::GetFunctionFromIP2(LPCBYTE ip, FunctionID * pFunc
EECodeInfo codeInfo;
- hr = GetFunctionFromIPInternal(ip, &codeInfo);
+ hr = GetFunctionFromIPInternal(ip, &codeInfo, /* failOnNoMetadata */ TRUE);
if (FAILED(hr))
{
return hr;
@@ -4122,7 +4129,6 @@ DWORD ProfToEEInterfaceImpl::GetModuleFlags(Module * pModule)
}
#endif
// Not NGEN or ReadyToRun.
-
if (pPEFile->HasOpenedILimage())
{
PEImage * pILImage = pPEFile->GetOpenedILimage();
@@ -6051,7 +6057,7 @@ HRESULT ProfToEEInterfaceImpl::SetEnterLeaveFunctionHooks3WithInfo(FunctionEnter
// The profiler must call SetEnterLeaveFunctionHooks3WithInfo during initialization, since
// the enter/leave events are immutable and must also be set during initialization.
- PROFILER_TO_CLR_ENTRYPOINT_SET_ELT((LF_CORPROF,
+ PROFILER_TO_CLR_ENTRYPOINT_SET_ELT((LF_CORPROF,
LL_INFO10,
"**PROF: SetEnterLeaveFunctionHooks3WithInfo 0x%p, 0x%p, 0x%p.\n",
pFuncEnter3WithInfo,
@@ -6373,6 +6379,294 @@ HRESULT ProfToEEInterfaceImpl::GetFunctionInfo2(FunctionID funcId,
}
/*
+* IsFunctionDynamic
+*
+* This function takes a functionId that maybe of a metadata-less method like an IL Stub
+* or LCG method and returns true in the pHasNoMetadata if it is indeed a metadata-less
+* method.
+*
+* Parameters:
+* functionId - The function that is being requested.
+* isDynamic - An optional parameter for returning if the function has metadata or not.
+*
+* Returns:
+* S_OK if successful.
+*/
+HRESULT ProfToEEInterfaceImpl::IsFunctionDynamic(FunctionID functionId, BOOL *isDynamic)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ EE_THREAD_NOT_REQUIRED;
+
+ // Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation eventually
+ // reads metadata which causes us to take a reader lock. However, see
+ // code:#DisableLockOnAsyncCalls
+ DISABLED(CAN_TAKE_LOCK);
+
+ // Asynchronous functions can be called at arbitrary times when runtime
+ // is holding locks that cannot be reentered without causing deadlock.
+ // This contract detects any attempts to reenter locks held at the time
+ // this function was called.
+ CANNOT_RETAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(isDynamic, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // See code:#DisableLockOnAsyncCalls
+ PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: IsFunctionDynamic 0x%p.\n",
+ functionId));
+
+ //
+ // Verify parameters.
+ //
+
+ if (functionId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ MethodDesc *pMethDesc = FunctionIdToMethodDesc(functionId);
+
+ if (pMethDesc == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ // it's not safe to examine a methoddesc that has not been restored so do not do so
+ if (!pMethDesc->IsRestored())
+ return CORPROF_E_DATAINCOMPLETE;
+
+ //
+ // Fill in the pHasNoMetadata, if desired.
+ //
+ if (isDynamic != NULL)
+ {
+ *isDynamic = pMethDesc->IsNoMetadata();
+ }
+
+ return S_OK;
+}
+
+/*
+* GetFunctionFromIP3
+*
+* This function takes an IP and determines if it is a managed function returning its
+* FunctionID. This method is different from GetFunctionFromIP in that will return
+* FunctionIDs even if they have no associated metadata.
+*
+* Parameters:
+* ip - The instruction pointer.
+* pFunctionId - An optional parameter for returning the FunctionID.
+* pReJitId - The ReJIT id.
+*
+* Returns:
+* S_OK if successful.
+*/
+HRESULT ProfToEEInterfaceImpl::GetFunctionFromIP3(LPCBYTE ip, FunctionID * pFunctionId, ReJITID * pReJitId)
+{
+ CONTRACTL
+ {
+ NOTHROW;
+
+ // Grabbing the rejitid requires entering the rejit manager's hash table & lock,
+ // which can switch us to preemptive mode and trigger GCs
+ GC_TRIGGERS;
+ MODE_ANY;
+ EE_THREAD_NOT_REQUIRED;
+
+ // Grabbing the rejitid requires entering the rejit manager's hash table & lock,
+ CAN_TAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+ }
+ CONTRACTL_END;
+
+ // See code:#DisableLockOnAsyncCalls
+ PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
+
+ PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
+ kP2EEAllowableAfterAttach | kP2EETriggers,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetFunctionFromIP3 0x%p.\n",
+ ip));
+
+ HRESULT hr = S_OK;
+
+ EECodeInfo codeInfo;
+
+ hr = GetFunctionFromIPInternal(ip, &codeInfo, /* failOnNoMetadata */ FALSE);
+ if (FAILED(hr))
+ {
+ return hr;
+ }
+
+ if (pFunctionId)
+ {
+ *pFunctionId = MethodDescToFunctionID(codeInfo.GetMethodDesc());
+ }
+
+ if (pReJitId != NULL)
+ {
+ MethodDesc * pMD = codeInfo.GetMethodDesc();
+ *pReJitId = pMD->GetReJitManager()->GetReJitId(pMD, codeInfo.GetStartAddress());
+ }
+
+ return S_OK;
+}
+
+/*
+* GetDynamicFunctionInfo
+*
+* This function takes a functionId that maybe of a metadata-less method like an IL Stub
+* or LCG method and gives information about it without failing like GetFunctionInfo.
+*
+* Parameters:
+* functionId - The function that is being requested.
+* pModuleId - An optional parameter for returning the module of the function.
+* ppvSig - An optional parameter for returning the signature of the function.
+* pbSig - An optional parameter for returning the size of the signature of the function.
+* cchName - A parameter for indicating the size of buffer for the wszName parameter.
+* pcchName - An optional parameter for returning the true size of the wszName parameter.
+* wszName - A parameter to the caller allocated buffer of size cchName
+*
+* Returns:
+* S_OK if successful.
+*/
+HRESULT ProfToEEInterfaceImpl::GetDynamicFunctionInfo(FunctionID functionId,
+ ModuleID *pModuleId,
+ PCCOR_SIGNATURE* ppvSig,
+ ULONG* pbSig,
+ ULONG cchName,
+ ULONG *pcchName,
+ __out_ecount_part_opt(cchName, *pcchName) WCHAR wszName[])
+{
+ CONTRACTL
+ {
+ NOTHROW;
+ GC_NOTRIGGER;
+ MODE_ANY;
+ EE_THREAD_NOT_REQUIRED;
+
+ // Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation eventually
+ // reads metadata which causes us to take a reader lock. However, see
+ // code:#DisableLockOnAsyncCalls
+ DISABLED(CAN_TAKE_LOCK);
+
+ // Asynchronous functions can be called at arbitrary times when runtime
+ // is holding locks that cannot be reentered without causing deadlock.
+ // This contract detects any attempts to reenter locks held at the time
+ // this function was called.
+ CANNOT_RETAKE_LOCK;
+
+ SO_NOT_MAINLINE;
+
+ PRECONDITION(CheckPointer(pModuleId, NULL_OK));
+ PRECONDITION(CheckPointer(ppvSig, NULL_OK));
+ PRECONDITION(CheckPointer(pbSig, NULL_OK));
+ PRECONDITION(CheckPointer(pcchName, NULL_OK));
+ }
+ CONTRACTL_END;
+
+ // See code:#DisableLockOnAsyncCalls
+ PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
+
+ PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
+ (LF_CORPROF,
+ LL_INFO1000,
+ "**PROF: GetDynamicFunctionInfo 0x%p.\n",
+ functionId));
+
+ //
+ // Verify parameters.
+ //
+
+ if (functionId == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ MethodDesc *pMethDesc = FunctionIdToMethodDesc(functionId);
+
+ if (pMethDesc == NULL)
+ {
+ return E_INVALIDARG;
+ }
+
+ // it's not safe to examine a methoddesc that has not been restored so do not do so
+ if (!pMethDesc->IsRestored())
+ return CORPROF_E_DATAINCOMPLETE;
+
+
+ if (!pMethDesc->IsNoMetadata())
+ return E_INVALIDARG;
+
+ //
+ // Fill in the ModuleId, if desired.
+ //
+ if (pModuleId != NULL)
+ {
+ *pModuleId = (ModuleID)pMethDesc->GetModule();
+ }
+
+ //
+ // Fill in the ppvSig and pbSig, if desired
+ //
+ if (ppvSig != NULL && pbSig != NULL)
+ {
+ pMethDesc->GetSig(ppvSig, pbSig);
+ }
+
+ HRESULT hr = S_OK;
+
+ EX_TRY
+ {
+ if (wszName != NULL)
+ *wszName = 0;
+ if (pcchName != NULL)
+ *pcchName = 0;
+
+ StackSString ss;
+ ss.SetUTF8(pMethDesc->GetName());
+ ss.Normalize();
+ LPCWSTR methodName = ss.GetUnicode();
+
+ ULONG trueLen = (ULONG)(wcslen(methodName) + 1);
+
+ // Return name of method as required.
+ if (wszName && cchName > 0)
+ {
+ if (cchName < trueLen)
+ {
+ hr = HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
+ }
+ else
+ {
+ wcsncpy_s(wszName, cchName, methodName, trueLen);
+ }
+ }
+
+ // If they request the actual length of the name
+ if (pcchName)
+ *pcchName = trueLen;
+ }
+ EX_CATCH_HRESULT(hr);
+
+ return (hr);
+}
+
+/*
* GetStringLayout
*
* This function describes to a profiler the internal layout of a string.
@@ -7081,12 +7375,13 @@ Loop:
// GC info will assist us in determining whether this is a non-EBP frame and
// info about pushed arguments.
- PTR_VOID gcInfo = codeInfo.GetGCInfo();
+ GCInfoToken gcInfoToken = codeInfo.GetGCInfoToken();
+ PTR_VOID gcInfo = gcInfoToken.Info;
InfoHdr header;
unsigned uiMethodSizeDummy;
PTR_CBYTE table = PTR_CBYTE(gcInfo);
table += decodeUnsigned(table, &uiMethodSizeDummy);
- table = decodeHeader(table, &header);
+ table = decodeHeader(table, gcInfoToken.Version, &header);
// Ok, GCInfo, can we do a simple EBP walk or what?
@@ -7235,18 +7530,13 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread,
ULONG32 contextSize)
{
-#ifdef _TARGET_ARM_
- // DoStackSnapshot is not supported on arm. Profilers can use OS apis to get the call stack.
- return E_NOTIMPL;
-#endif
-
-#if !defined(FEATURE_HIJACK) || !defined(PLATFORM_SUPPORTS_SAFE_THREADSUSPEND)
+#if !defined(FEATURE_HIJACK)
// DoStackSnapshot needs Thread::Suspend/ResumeThread functionality.
// On platforms w/o support for these APIs return E_NOTIMPL.
return E_NOTIMPL;
-#else // !defined(FEATURE_HIJACK) || !defined(PLATFORM_SUPPORTS_SAFE_THREADSUSPEND)
+#else // !defined(FEATURE_HIJACK)
CONTRACTL
{
@@ -7414,6 +7704,10 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread,
// First, check "1) Target thread to walk == current thread OR Target thread is suspended"
if (pThreadToSnapshot != pCurrentThread)
{
+#ifndef PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
+ hr = E_NOTIMPL;
+ goto Cleanup;
+#else
// Walking separate thread, so it must be suspended. First, ensure that
// target thread exists.
//
@@ -7449,6 +7743,7 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread,
hr = CORPROF_E_STACKSNAPSHOT_UNSAFE;
goto Cleanup;
}
+#endif // !PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
}
hostCallPreference =
@@ -7481,7 +7776,10 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread,
// (Note that this whole block is skipped if pThreadToSnapshot is in preemptive mode (the IF
// above), as the context is unused in such a case--the EE Frame chain is used
// to seed the walk instead.)
-
+#ifndef PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
+ hr = E_NOTIMPL;
+ goto Cleanup;
+#else
if (!pThreadToSnapshot->GetSafelyRedirectableThreadContext(Thread::kDefaultChecks, &ctxCurrent, &rd))
{
LOG((LF_CORPROF, LL_INFO100, "**PROF: GetSafelyRedirectableThreadContext failure leads to CORPROF_E_STACKSNAPSHOT_UNSAFE.\n"));
@@ -7542,6 +7840,7 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread,
{
pctxSeed = &ctxCurrent;
}
+#endif // !PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
}
// Second, check "2) Target thread to walk is currently executing JITted / NGENd code"
@@ -7588,6 +7887,10 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread,
//
if (pThreadToSnapshot != pCurrentThread)
{
+#ifndef PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
+ hr = E_NOTIMPL;
+ goto Cleanup;
+#else
if (pctxSeed == NULL)
{
if (pThreadToSnapshot->GetSafelyRedirectableThreadContext(Thread::kDefaultChecks, &ctxCurrent, &rd))
@@ -7604,9 +7907,9 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread,
}
}
}
+#endif // !PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
}
-#endif
-
+#endif //_DEBUG
// Third, verify the target thread is seeded or not in the midst of an unwind.
if (pctxSeed == NULL)
{
@@ -7671,12 +7974,13 @@ HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread,
INDEBUG(if (pCurrentThread) pCurrentThread->m_ulForbidTypeLoad = ulForbidTypeLoad;)
-
Cleanup:
+#if defined(PLATFORM_SUPPORTS_SAFE_THREADSUSPEND)
if (fResumeThread)
{
pThreadToSnapshot->ResumeThread();
}
+#endif // PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
if (fResetSnapshotThreadExternalCount)
{
pThreadToSnapshot->DecExternalCountDANGEROUSProfilerOnly();
@@ -7684,7 +7988,7 @@ Cleanup:
return hr;
-#endif // !defined(FEATURE_HIJACK) || !defined(PLATFORM_SUPPORTS_SAFE_THREADSUSPEND)
+#endif // !defined(FEATURE_HIJACK)
}
@@ -8439,7 +8743,7 @@ HRESULT ProfToEEInterfaceImpl::RequestProfilerDetach(DWORD dwExpectedCompletionM
typedef struct _COR_PRF_ELT_INFO_INTERNAL
{
// Point to a platform dependent structure ASM helper push on the stack
- void * platformSpecificHandle;
+ void * platformSpecificHandle;
// startAddress of COR_PRF_FUNCTION_ARGUMENT_RANGE structure needs to point
// TO the argument value, not BE the argument value. So, when the argument
@@ -9461,7 +9765,7 @@ FCIMPL2(void, ProfilingFCallHelper::FC_RemotingClientSendingMessage, GUID *pId,
// it is a value class declared on the stack and so GC doesn't
// know about it.
- _ASSERTE (!GCHeap::GetGCHeap()->IsHeapPointer(pId)); // should be on the stack, not in the heap
+ _ASSERTE (!GCHeapUtilities::GetGCHeap()->IsHeapPointer(pId)); // should be on the stack, not in the heap
HELPER_METHOD_FRAME_BEGIN_NOPOLL();
{
diff --git a/src/vm/proftoeeinterfaceimpl.h b/src/vm/proftoeeinterfaceimpl.h
index 7bbd0bcf45..ed53ae2192 100644
--- a/src/vm/proftoeeinterfaceimpl.h
+++ b/src/vm/proftoeeinterfaceimpl.h
@@ -133,7 +133,7 @@ typedef struct _PROFILER_STACK_WALK_DATA PROFILER_STACK_WALK_DATA;
// from the profiler implementation. The profiler will call back on the v-table
// to get at EE internals as required.
-class ProfToEEInterfaceImpl : public ICorProfilerInfo7
+class ProfToEEInterfaceImpl : public ICorProfilerInfo8
{
public:
@@ -555,6 +555,28 @@ public:
// end ICorProfilerInfo7
+ // begin ICorProfilerInfo8
+
+ COM_METHOD IsFunctionDynamic(
+ FunctionID functionId,
+ BOOL *isDynamic);
+
+ COM_METHOD GetFunctionFromIP3(
+ LPCBYTE ip, // in
+ FunctionID * pFunctionId, // out
+ ReJITID * pReJitId); // out
+
+ COM_METHOD GetDynamicFunctionInfo(
+ FunctionID functionId,
+ ModuleID* moduleId,
+ PCCOR_SIGNATURE* ppvSig,
+ ULONG* pbSig,
+ ULONG cchName,
+ ULONG *pcchName,
+ WCHAR wszName[]);
+
+ // end ICorProfilerInfo8
+
protected:
// Internal Helper Functions
diff --git a/src/vm/rcwwalker.cpp b/src/vm/rcwwalker.cpp
index ad718126c1..0b875360fd 100644
--- a/src/vm/rcwwalker.cpp
+++ b/src/vm/rcwwalker.cpp
@@ -129,10 +129,10 @@ STDMETHODIMP CLRServicesImpl::GarbageCollect(DWORD dwFlags)
{
GCX_COOP_THREAD_EXISTS(GET_THREAD());
if (dwFlags & GC_FOR_APPX_SUSPEND) {
- GCHeap::GetGCHeap()->GarbageCollect(2, TRUE, collection_blocking | collection_optimized);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(2, TRUE, collection_blocking | collection_optimized);
}
else
- GCHeap::GetGCHeap()->GarbageCollect();
+ GCHeapUtilities::GetGCHeap()->GarbageCollect();
}
END_EXTERNAL_ENTRYPOINT;
return hr;
diff --git a/src/vm/readytoruninfo.cpp b/src/vm/readytoruninfo.cpp
index f867036823..a0e44ceaf3 100644
--- a/src/vm/readytoruninfo.cpp
+++ b/src/vm/readytoruninfo.cpp
@@ -401,6 +401,55 @@ static void LogR2r(const char *msg, PEFile *pFile)
#define DoLog(msg) if (s_r2rLogFile != NULL) LogR2r(msg, pFile)
+// Try to acquire an R2R image for exclusive use by a particular module.
+// Returns true if successful. Returns false if the image is already been used
+// by another module. Each R2R image has a space to store a pointer to the
+// module that owns it. We set this pointer unless it has already be
+// initialized to point to another Module.
+static bool AcquireImage(Module * pModule, PEImageLayout * pLayout, READYTORUN_HEADER * pHeader)
+{
+ STANDARD_VM_CONTRACT;
+
+ // First find the import sections of the image.
+ READYTORUN_IMPORT_SECTION * pImportSections = NULL;
+ READYTORUN_IMPORT_SECTION * pImportSectionsEnd = NULL;
+ READYTORUN_SECTION * pSections = (READYTORUN_SECTION*)(pHeader + 1);
+ for (DWORD i = 0; i < pHeader->NumberOfSections; i++)
+ {
+ if (pSections[i].Type == READYTORUN_SECTION_IMPORT_SECTIONS)
+ {
+ pImportSections = (READYTORUN_IMPORT_SECTION*)((PBYTE)pLayout->GetBase() + pSections[i].Section.VirtualAddress);
+ pImportSectionsEnd = (READYTORUN_IMPORT_SECTION*)((PBYTE)pImportSections + pSections[i].Section.Size);
+ break;
+ }
+ }
+
+ // Go through the import sections to find the import for the module pointer.
+ for (READYTORUN_IMPORT_SECTION * pCurSection = pImportSections; pCurSection < pImportSectionsEnd; pCurSection++)
+ {
+ // The import for the module pointer is always in an eager fixup section, so skip delayed fixup sections.
+ if ((pCurSection->Flags & READYTORUN_IMPORT_SECTION_FLAGS_EAGER) == 0)
+ continue;
+
+ // Found an eager fixup section. Check the signature of each fixup in this section.
+ PVOID *pFixups = (PVOID *)((PBYTE)pLayout->GetBase() + pCurSection->Section.VirtualAddress);
+ DWORD nFixups = pCurSection->Section.Size / sizeof(PVOID);
+ DWORD *pSignatures = (DWORD *)((PBYTE)pLayout->GetBase() + pCurSection->Signatures);
+ for (DWORD i = 0; i < nFixups; i++)
+ {
+ // See if we found the fixup for the Module pointer.
+ PBYTE pSig = (PBYTE)pLayout->GetBase() + pSignatures[i];
+ if (pSig[0] == READYTORUN_FIXUP_Helper && pSig[1] == READYTORUN_HELPER_Module)
+ {
+ Module * pPrevious = InterlockedCompareExchangeT(EnsureWritablePages((Module **)(pFixups + i)), pModule, NULL);
+ return pPrevious == NULL || pPrevious == pModule;
+ }
+ }
+ }
+
+ return false;
+}
+
PTR_ReadyToRunInfo ReadyToRunInfo::Initialize(Module * pModule, AllocMemTracker *pamTracker)
{
STANDARD_VM_CONTRACT;
@@ -478,6 +527,12 @@ PTR_ReadyToRunInfo ReadyToRunInfo::Initialize(Module * pModule, AllocMemTracker
return NULL;
}
+ if (!AcquireImage(pModule, pLayout, pHeader))
+ {
+ DoLog("Ready to Run disabled - module already loaded in another AppDomain");
+ return NULL;
+ }
+
LoaderHeap *pHeap = pModule->GetLoaderAllocator()->GetHighFrequencyHeap();
void * pMemory = pamTracker->Track(pHeap->AllocMem((S_SIZE_T)sizeof(ReadyToRunInfo)));
@@ -636,6 +691,22 @@ PCODE ReadyToRunInfo::GetEntryPoint(MethodDesc * pMD, BOOL fFixups /*=TRUE*/)
return NULL;
}
+#ifndef CROSSGEN_COMPILE
+#ifdef PROFILING_SUPPORTED
+ BOOL fShouldSearchCache = TRUE;
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
+ g_profControlBlock.pProfInterface->
+ JITCachedFunctionSearchStarted((FunctionID)pMD, &fShouldSearchCache);
+ END_PIN_PROFILER();
+ }
+ if (!fShouldSearchCache)
+ {
+ return NULL;
+ }
+#endif // PROFILING_SUPPORTED
+#endif // CROSSGEN_COMPILE
+
uint id;
offset = m_nativeReader.DecodeUnsigned(offset, &id);
@@ -671,6 +742,17 @@ PCODE ReadyToRunInfo::GetEntryPoint(MethodDesc * pMD, BOOL fFixups /*=TRUE*/)
m_entryPointToMethodDescMap.InsertValue(PCODEToPINSTR(pEntryPoint), pMD);
}
+#ifndef CROSSGEN_COMPILE
+#ifdef PROFILING_SUPPORTED
+ {
+ BEGIN_PIN_PROFILER(CORProfilerTrackCacheSearches());
+ g_profControlBlock.pProfInterface->
+ JITCachedFunctionSearchFinished((FunctionID)pMD, COR_PRF_CACHED_FUNCTION_FOUND);
+ END_PIN_PROFILER();
+ }
+#endif // PROFILING_SUPPORTED
+#endif // CROSSGEN_COMPILE
+
if (g_pDebugInterface != NULL)
{
g_pDebugInterface->JITComplete(pMD, pEntryPoint);
diff --git a/src/vm/reflectioninvocation.cpp b/src/vm/reflectioninvocation.cpp
index de50514682..4edecdd2c6 100644
--- a/src/vm/reflectioninvocation.cpp
+++ b/src/vm/reflectioninvocation.cpp
@@ -2508,6 +2508,7 @@ FCIMPL1(void, ReflectionInvocation::PrepareContractedDelegate, Object * delegate
}
CONTRACTL_END;
+#ifdef FEATURE_CER
if (delegateUNSAFE == NULL)
return;
@@ -2517,9 +2518,11 @@ FCIMPL1(void, ReflectionInvocation::PrepareContractedDelegate, Object * delegate
PrepareDelegateHelper(&delegate, TRUE);
HELPER_METHOD_FRAME_END();
+#endif // FEATURE_CER
}
FCIMPLEND
+#ifdef FEATURE_CER
void ReflectionInvocation::PrepareDelegateHelper(OBJECTREF *pDelegate, BOOL onlyContractedMethod)
{
CONTRACTL {
@@ -2601,6 +2604,7 @@ void ReflectionInvocation::PrepareDelegateHelper(OBJECTREF *pDelegate, BOOL only
onlyContractedMethod);
}
}
+#endif // FEATURE_CER
FCIMPL0(void, ReflectionInvocation::ProbeForSufficientStack)
{
@@ -2637,7 +2641,6 @@ FCIMPL0(void, ReflectionInvocation::EnsureSufficientExecutionStack)
}
FCIMPLEND
-#ifdef FEATURE_CORECLR
// As with EnsureSufficientExecutionStack, this method checks and returns whether there is
// sufficient stack to execute the average Framework method, but rather than throwing,
// it simply returns a Boolean: true for sufficient stack space, otherwise false.
@@ -2654,7 +2657,6 @@ FCIMPL0(FC_BOOL_RET, ReflectionInvocation::TryEnsureSufficientExecutionStack)
FC_RETURN_BOOL(current >= limit);
}
FCIMPLEND
-#endif // FEATURE_CORECLR
struct ECWGCFContext
{
@@ -2849,6 +2851,7 @@ FCIMPL3(void, ReflectionInvocation::ExecuteCodeWithGuaranteedCleanup, Object* co
if (gc.backoutDelegate == NULL)
COMPlusThrowArgumentNull(W("backoutCode"));
+#ifdef FEATURE_CER
if (!IsCompilationProcess())
{
// Delegates are prepared as part of the ngen process, so only prepare the backout
@@ -2859,6 +2862,7 @@ FCIMPL3(void, ReflectionInvocation::ExecuteCodeWithGuaranteedCleanup, Object* co
// attempt to run the backout code.
PrepareMethodDesc(g_pExecuteBackoutCodeHelperMethod, Instantiation(), Instantiation(), FALSE, TRUE);
}
+#endif // FEATURE_CER
ExecuteCodeWithGuaranteedCleanupHelper(&gc);
diff --git a/src/vm/reflectioninvocation.h b/src/vm/reflectioninvocation.h
index fd14532091..7f72b61cd8 100644
--- a/src/vm/reflectioninvocation.h
+++ b/src/vm/reflectioninvocation.h
@@ -56,10 +56,8 @@ public:
#endif // !FEATURE_CORECLR
static FCDECL1(void, PrepareContractedDelegate, Object* delegateUNSAFE);
static FCDECL0(void, ProbeForSufficientStack);
- static FCDECL0(void, EnsureSufficientExecutionStack);
-#ifdef FEATURE_CORECLR // currently only used from mscorlib in FEATURE_CORECLR
- static FCDECL0(FC_BOOL_RET, TryEnsureSufficientExecutionStack);
-#endif // FEATURE_CORECLR
+ static FCDECL0(void, EnsureSufficientExecutionStack);
+ static FCDECL0(FC_BOOL_RET, TryEnsureSufficientExecutionStack);
static FCDECL3(void, ExecuteCodeWithGuaranteedCleanup, Object* pCodeDelegateUNSAFE, Object* pBackoutDelegateUNSAFE, Object* pUserDataUNSAFE);
// TypedReference functions, should go somewhere else
diff --git a/src/vm/rejit.cpp b/src/vm/rejit.cpp
index 6b3caa9091..0b6e922831 100644
--- a/src/vm/rejit.cpp
+++ b/src/vm/rejit.cpp
@@ -178,22 +178,22 @@ CrstStatic ReJitManager::s_csGlobalRequest;
//---------------------------------------------------------------------------------------
// Helpers
-inline DWORD JitFlagsFromProfCodegenFlags(DWORD dwCodegenFlags)
+inline CORJIT_FLAGS JitFlagsFromProfCodegenFlags(DWORD dwCodegenFlags)
{
LIMITED_METHOD_DAC_CONTRACT;
- DWORD jitFlags = 0;
+ CORJIT_FLAGS jitFlags;
// Note: COR_PRF_CODEGEN_DISABLE_INLINING is checked in
// code:CEEInfo::canInline#rejit (it has no equivalent CORJIT flag).
if ((dwCodegenFlags & COR_PRF_CODEGEN_DISABLE_ALL_OPTIMIZATIONS) != 0)
{
- jitFlags |= CORJIT_FLG_DEBUG_CODE;
+ jitFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE);
}
// In the future more flags may be added that need to be converted here (e.g.,
- // COR_PRF_CODEGEN_ENTERLEAVE / CORJIT_FLG_PROF_ENTERLEAVE)
+ // COR_PRF_CODEGEN_ENTERLEAVE / CORJIT_FLAG_PROF_ENTERLEAVE)
return jitFlags;
}
@@ -2170,8 +2170,7 @@ PCODE ReJitManager::DoReJit(ReJitInfo * pInfo)
pCodeOfRejittedCode = UnsafeJitFunction(
pInfo->GetMethodDesc(),
&ILHeader,
- JitFlagsFromProfCodegenFlags(pInfo->m_pShared->m_dwCodegenFlags),
- 0);
+ JitFlagsFromProfCodegenFlags(pInfo->m_pShared->m_dwCodegenFlags));
_ASSERTE(pCodeOfRejittedCode != NULL);
diff --git a/src/vm/rexcep.h b/src/vm/rexcep.h
index fe920bcd2d..1d1d9385f0 100644
--- a/src/vm/rexcep.h
+++ b/src/vm/rexcep.h
@@ -144,7 +144,11 @@ DEFINE_EXCEPTION(g_SystemNS, BadImageFormatException, true,
DEFINE_EXCEPTION(g_SystemNS, CannotUnloadAppDomainException, false, COR_E_CANNOTUNLOADAPPDOMAIN)
DEFINE_EXCEPTION(g_CodeContractsNS, ContractException, false, COR_E_CODECONTRACTFAILED)
+
+#ifdef FEATURE_REMOTING
DEFINE_EXCEPTION(g_SystemNS, ContextMarshalException, false, COR_E_CONTEXTMARSHAL)
+#endif
+
DEFINE_EXCEPTION(g_ReflectionNS, CustomAttributeFormatException, false, COR_E_CUSTOMATTRIBUTEFORMAT)
#if defined(FEATURE_X509) || defined(FEATURE_CRYPTO)
diff --git a/src/vm/runtimecallablewrapper.cpp b/src/vm/runtimecallablewrapper.cpp
index d12d5568f6..359b6896bc 100644
--- a/src/vm/runtimecallablewrapper.cpp
+++ b/src/vm/runtimecallablewrapper.cpp
@@ -42,6 +42,7 @@ class Object;
#include "olevariant.h"
#include "interopconverter.h"
#include "constrainedexecutionregion.h"
+#include "typestring.h"
#ifdef FEATURE_REMOTING
#include "crossdomaincalls.h"
#endif
@@ -1591,7 +1592,7 @@ public:
if (pRCW->IsValid())
{
- if (!GCHeap::GetGCHeap()->IsPromoted(OBJECTREFToObject(pRCW->GetExposedObject())) &&
+ if (!GCHeapUtilities::GetGCHeap()->IsPromoted(OBJECTREFToObject(pRCW->GetExposedObject())) &&
!pRCW->IsDetached())
{
// No need to use InterlockedOr here since every other place that modifies the flags
@@ -1612,7 +1613,7 @@ void RCWCache::DetachWrappersWorker()
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- PRECONDITION(GCHeap::IsGCInProgress()); // GC is in progress and the runtime is suspended
+ PRECONDITION(GCHeapUtilities::IsGCInProgress()); // GC is in progress and the runtime is suspended
}
CONTRACTL_END;
@@ -2808,7 +2809,7 @@ void RCW::MinorCleanup()
NOTHROW;
GC_NOTRIGGER;
MODE_ANY;
- PRECONDITION(GCHeap::IsGCInProgress() || ( (g_fEEShutDown & ShutDown_SyncBlock) && g_fProcessDetach ));
+ PRECONDITION(GCHeapUtilities::IsGCInProgress() || ( (g_fEEShutDown & ShutDown_SyncBlock) && g_fProcessDetach ));
}
CONTRACTL_END;
diff --git a/src/vm/safehandle.cpp b/src/vm/safehandle.cpp
index 3336e693b5..828b221025 100644
--- a/src/vm/safehandle.cpp
+++ b/src/vm/safehandle.cpp
@@ -246,7 +246,7 @@ void SafeHandle::Dispose()
// Suppress finalization on this object (we may be racing here but the
// operation below is idempotent and a dispose should never race a
// finalization).
- GCHeap::GetGCHeap()->SetFinalizationRun(OBJECTREFToObject(sh));
+ GCHeapUtilities::GetGCHeap()->SetFinalizationRun(OBJECTREFToObject(sh));
GCPROTECT_END();
}
@@ -394,7 +394,7 @@ FCIMPL1(void, SafeHandle::SetHandleAsInvalid, SafeHandle* refThisUNSAFE)
} while (InterlockedCompareExchange((LONG*)&sh->m_state, newState, oldState) != oldState);
- GCHeap::GetGCHeap()->SetFinalizationRun(OBJECTREFToObject(sh));
+ GCHeapUtilities::GetGCHeap()->SetFinalizationRun(OBJECTREFToObject(sh));
}
FCIMPLEND
diff --git a/src/vm/securityattributes.cpp b/src/vm/securityattributes.cpp
index 2c89540974..0facbbbfb3 100644
--- a/src/vm/securityattributes.cpp
+++ b/src/vm/securityattributes.cpp
@@ -304,9 +304,7 @@ void SecurityAttributes::EncodePermissionSet(IN OBJECTREF* pRef,
ppbData,
pcbData);
}
-#endif // FEATURE_CAS_POLICY
-#ifdef FEATURE_CAS_POLICY
static void SetupRestrictSecAttributes()
{
CONTRACTL {
@@ -334,7 +332,6 @@ static void SetupRestrictSecAttributes()
}
EX_END_CATCH(RethrowTerminalExceptions)
}
-#endif // FEATURE_CAS_POLICY
Assembly* SecurityAttributes::LoadAssemblyFromToken(IMetaDataAssemblyImport *pImport, mdAssemblyRef tkAssemblyRef)
{
@@ -1182,7 +1179,6 @@ void SecurityAttributes::AttrSetBlobToPermissionSets(
COMPlusThrowHR(hr);
}
-#ifdef FEATURE_CAS_POLICY
HRESULT SecurityAttributes::TranslateSecurityAttributesHelper(
CORSEC_ATTRSET *pAttrSet,
BYTE **ppbOutput,
@@ -1255,7 +1251,6 @@ HRESULT SecurityAttributes::TranslateSecurityAttributesHelper(
EX_CATCH_HRESULT(hr);
return hr;
}
-#endif // FEATURE_CAS_POLICY
// Call into managed code to group permissions into a PermissionSet and serialize it to XML
void SecurityAttributes::AttrArrayToPermissionSet(OBJECTREF* attrArray,
@@ -1354,7 +1349,7 @@ void SecurityAttributes::AttrArrayToPermissionSet(OBJECTREF* attrArray,
GCPROTECT_END();
}
-
+#endif // FEATURE_CAS_POLICY
//
// This is a public exported method
diff --git a/src/vm/securitydeclarativecache.cpp b/src/vm/securitydeclarativecache.cpp
index 29868f2b0c..dcfc1e0c4d 100644
--- a/src/vm/securitydeclarativecache.cpp
+++ b/src/vm/securitydeclarativecache.cpp
@@ -75,6 +75,7 @@ OBJECTREF PsetCacheEntry::CreateManagedPsetObject(DWORD dwAction, bool createEmp
MODE_COOPERATIVE;
} CONTRACTL_END;
+#ifdef FEATURE_CAS_POLICY
OBJECTREF orRet;
orRet = GetManagedPsetObject();
@@ -106,7 +107,6 @@ OBJECTREF PsetCacheEntry::CreateManagedPsetObject(DWORD dwAction, bool createEmp
} else {
-#ifdef FEATURE_CAS_POLICY
SecurityAttributes::XmlToPermissionSet(m_pKey->m_pbPset,
m_pKey->m_cbPset,
&gc.pset,
@@ -115,10 +115,6 @@ OBJECTREF PsetCacheEntry::CreateManagedPsetObject(DWORD dwAction, bool createEmp
0,
&gc.orNonCasPset,
&gc.orNonCasEncoding);
-#else
- // The v1.x serialized permission set format is not supported on CoreCLR
- COMPlusThrowHR(CORSECATTR_E_BAD_ATTRIBUTE);
-#endif //FEATURE_CAS_POLICY
}
StoreFirstObjectInHandle(m_handle, gc.pset);
@@ -135,6 +131,9 @@ OBJECTREF PsetCacheEntry::CreateManagedPsetObject(DWORD dwAction, bool createEmp
orRet = GetManagedPsetObject();
return orRet;
+#else
+ return NULL;
+#endif
}
#endif // CROSSGEN_COMPILE
diff --git a/src/vm/securityprincipal.h b/src/vm/securityprincipal.h
index 3bde5fd234..5d6b522a27 100644
--- a/src/vm/securityprincipal.h
+++ b/src/vm/securityprincipal.h
@@ -11,9 +11,11 @@
+#ifndef FEATURE_PAL
class COMPrincipal
{
public:
+#ifndef FEATURE_CORECLR
static
INT32 QCALLTYPE ImpersonateLoggedOnUser(HANDLE hToken);
@@ -24,6 +26,8 @@ public:
static
INT32 QCALLTYPE SetThreadToken(HANDLE hToken);
+#endif // !FEATURE_CORECLR
static void CLR_ImpersonateLoggedOnUser(HANDLE hToken);
};
+#endif // FEATURE_PAL
diff --git a/src/vm/siginfo.cpp b/src/vm/siginfo.cpp
index decd3c0aab..9adfb4998c 100644
--- a/src/vm/siginfo.cpp
+++ b/src/vm/siginfo.cpp
@@ -14,7 +14,7 @@
#include "clsload.hpp"
#include "vars.hpp"
#include "excep.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "field.h"
#include "eeconfig.h"
#include "runtimehandles.h" // for SignatureNative
diff --git a/src/vm/stacksampler.cpp b/src/vm/stacksampler.cpp
index 270d278b66..d95adb1f63 100644
--- a/src/vm/stacksampler.cpp
+++ b/src/vm/stacksampler.cpp
@@ -154,7 +154,7 @@ bool IsGoodMethodDesc(MethodDesc* pMD)
//
// An opportunity to record the parameters passed to the JIT at the time of JITting this method.
/* static */
-void StackSampler::RecordJittingInfo(MethodDesc* pMD, DWORD dwFlags, DWORD dwFlags2)
+void StackSampler::RecordJittingInfo(MethodDesc* pMD, CORJIT_FLAGS flags)
{
WRAPPER_NO_CONTRACT;
if (g_pStackSampler == nullptr)
@@ -167,10 +167,10 @@ void StackSampler::RecordJittingInfo(MethodDesc* pMD, DWORD dwFlags, DWORD dwFla
return;
}
// Record in the hash map.
- g_pStackSampler->RecordJittingInfoInternal(pMD, dwFlags);
+ g_pStackSampler->RecordJittingInfoInternal(pMD, flags);
}
-void StackSampler::RecordJittingInfoInternal(MethodDesc* pMD, DWORD dwFlags)
+void StackSampler::RecordJittingInfoInternal(MethodDesc* pMD, CORJIT_FLAGS flags)
{
ADID dwDomainId = GetThread()->GetDomain()->GetId();
JitInfoHashEntry entry(pMD, dwDomainId);
@@ -426,7 +426,7 @@ void StackSampler::JitAndCollectTrace(MethodDesc* pMD, const ADID& adId)
// Indicate to the JIT or the JIT interface that we are JITting
// in the background for stack sampling.
- DWORD dwFlags2 = CORJIT_FLG2_SAMPLING_JIT_BACKGROUND;
+ CORJIT_FLAGS flags(CORJIT_FLAGS::CORJIT_FLAG_SAMPLING_JIT_BACKGROUND);
_ASSERTE(pMD->IsIL());
@@ -447,7 +447,7 @@ void StackSampler::JitAndCollectTrace(MethodDesc* pMD, const ADID& adId)
LOG((LF_JIT, LL_INFO100000, "%s:%s\n", pMD->GetMethodTable()->GetClass()->GetDebugClassName(), pMD->GetName()));
#endif
- PCODE pCode = UnsafeJitFunction(pMD, pDecoder, 0, dwFlags2);
+ PCODE pCode = UnsafeJitFunction(pMD, pDecoder, flags);
}
END_DOMAIN_TRANSITION;
diff --git a/src/vm/stacksampler.h b/src/vm/stacksampler.h
index 33fc6b93ce..0b9add1713 100644
--- a/src/vm/stacksampler.h
+++ b/src/vm/stacksampler.h
@@ -21,7 +21,7 @@ class StackSampler
public:
// Interface
static void Init();
- static void RecordJittingInfo(MethodDesc* pMD, DWORD dwFlags, DWORD dwFlags2);
+ static void RecordJittingInfo(MethodDesc* pMD, CORJIT_FLAGS flags);
private:
@@ -41,7 +41,7 @@ private:
void JitAndCollectTrace(MethodDesc* pMD, const ADID& adId);
- void RecordJittingInfoInternal(MethodDesc* pMD, DWORD flags);
+ void RecordJittingInfoInternal(MethodDesc* pMD, CORJIT_FLAGS flags);
ADID GetDomainId(MethodDesc* pMD, const ADID& defaultId);
diff --git a/src/vm/stackwalk.cpp b/src/vm/stackwalk.cpp
index 3b0b4720f7..18a8900039 100644
--- a/src/vm/stackwalk.cpp
+++ b/src/vm/stackwalk.cpp
@@ -2416,7 +2416,7 @@ StackWalkAction StackFrameIterator::NextRaw(void)
OBJECTREF orUnwind = NULL;
if (m_crawl.GetCodeManager()->IsInSynchronizedRegion(m_crawl.GetRelOffset(),
- m_crawl.GetGCInfo(),
+ m_crawl.GetGCInfoToken(),
m_crawl.GetCodeManagerFlags()))
{
if (pMD->IsStatic())
@@ -2560,7 +2560,9 @@ StackWalkAction StackFrameIterator::NextRaw(void)
// to recover from AVs during profiler stackwalk.)
PTR_VOID newSP = PTR_VOID((TADDR)GetRegdisplaySP(m_crawl.pRD));
+#ifndef NO_FIXED_STACK_LIMIT
FAIL_IF_SPECULATIVE_WALK(newSP >= m_crawl.pThread->GetCachedStackLimit());
+#endif // !NO_FIXED_STACK_LIMIT
FAIL_IF_SPECULATIVE_WALK(newSP < m_crawl.pThread->GetCachedStackBase());
#undef FAIL_IF_SPECULATIVE_WALK
@@ -2675,7 +2677,7 @@ StackWalkAction StackFrameIterator::NextRaw(void)
// We are transitioning from unmanaged code to managed code... lets do some validation of our
// EH mechanism on platforms that we can.
-#if defined(_DEBUG) && !defined(DACCESS_COMPILE) && defined(_TARGET_X86_)
+#if defined(_DEBUG) && !defined(DACCESS_COMPILE) && (defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL))
VerifyValidTransitionFromManagedCode(m_crawl.pThread, &m_crawl);
#endif // _DEBUG && !DACCESS_COMPILE && _TARGET_X86_
}
@@ -3158,7 +3160,7 @@ void StackFrameIterator::PreProcessingForManagedFrames(void)
m_crawl.pFunc->IsSynchronized() &&
!m_crawl.pFunc->IsStatic() &&
m_crawl.GetCodeManager()->IsInSynchronizedRegion(m_crawl.GetRelOffset(),
- m_crawl.GetGCInfo(),
+ m_crawl.GetGCInfoToken(),
m_crawl.GetCodeManagerFlags()))
{
BEGIN_GCX_ASSERT_COOP;
diff --git a/src/vm/stackwalk.h b/src/vm/stackwalk.h
index 004d673a2a..f8bd519106 100644
--- a/src/vm/stackwalk.h
+++ b/src/vm/stackwalk.h
@@ -36,14 +36,14 @@ class AppDomain;
// on the stack. The FEF is used for unwinding. If not defined, the unwinding
// uses the exception context.
#define USE_FEF // to mark where code needs to be changed to eliminate the FEF
-#if defined(_TARGET_X86_)
+#if defined(_TARGET_X86_) && !defined(FEATURE_PAL)
#undef USE_FEF // Turn off the FEF use on x86.
#define ELIMINATE_FEF
#else
#if defined(ELIMINATE_FEF)
#undef ELIMINATE_FEF
#endif
-#endif // _86_
+#endif // _TARGET_X86_ && !FEATURE_PAL
//************************************************************************
// Enumerate all functions.
diff --git a/src/vm/stubcache.h b/src/vm/stubcache.h
index 781d46c8f5..b42ed16c25 100644
--- a/src/vm/stubcache.h
+++ b/src/vm/stubcache.h
@@ -50,7 +50,7 @@ public:
//---------------------------------------------------------
// Destructor
//---------------------------------------------------------
- ~StubCacheBase();
+ virtual ~StubCacheBase();
//---------------------------------------------------------
// Returns the equivalent hashed Stub, creating a new hash
diff --git a/src/vm/stubgen.cpp b/src/vm/stubgen.cpp
index fffa52a366..18a6c19480 100644
--- a/src/vm/stubgen.cpp
+++ b/src/vm/stubgen.cpp
@@ -631,15 +631,7 @@ ILStubLinker::LogILStubWorker(
}
}
-static inline void LogOneFlag(DWORD flags, DWORD flag, LPCSTR str, DWORD facility, DWORD level)
-{
- if (flags & flag)
- {
- LOG((facility, level, str));
- }
-}
-
-static void LogJitFlags(DWORD facility, DWORD level, DWORD dwJitFlags)
+static void LogJitFlags(DWORD facility, DWORD level, CORJIT_FLAGS jitFlags)
{
CONTRACTL
{
@@ -647,29 +639,28 @@ static void LogJitFlags(DWORD facility, DWORD level, DWORD dwJitFlags)
}
CONTRACTL_END;
- LOG((facility, level, "dwJitFlags: 0x%08x\n", dwJitFlags));
+ LOG((facility, level, "jitFlags:\n"));
-#define LOG_FLAG(name) LogOneFlag(dwJitFlags, name, " " #name "\n", facility, level);
+#define LOG_FLAG(name) \
+ if (jitFlags.IsSet(name)) \
+ { \
+ LOG((facility, level, " " #name "\n")); \
+ jitFlags.Clear(name); \
+ }
// these are all we care about at the moment
- LOG_FLAG(CORJIT_FLG_IL_STUB);
- LOG_FLAG(CORJIT_FLG_PUBLISH_SECRET_PARAM);
+ LOG_FLAG(CORJIT_FLAGS::CORJIT_FLAG_IL_STUB);
+ LOG_FLAG(CORJIT_FLAGS::CORJIT_FLAG_PUBLISH_SECRET_PARAM);
#undef LOG_FLAGS
- DWORD dwKnownMask =
- CORJIT_FLG_IL_STUB |
- CORJIT_FLG_PUBLISH_SECRET_PARAM |
- NULL;
-
- DWORD dwUnknownFlags = dwJitFlags & ~dwKnownMask;
- if (0 != dwUnknownFlags)
+ if (!jitFlags.IsEmpty())
{
- LOG((facility, level, "UNKNOWN FLAGS: 0x%08x\n", dwUnknownFlags));
+ LOG((facility, level, "UNKNOWN FLAGS also set\n"));
}
}
-void ILStubLinker::LogILStub(DWORD dwJitFlags, SString *pDumpILStubCode)
+void ILStubLinker::LogILStub(CORJIT_FLAGS jitFlags, SString *pDumpILStubCode)
{
CONTRACTL
{
@@ -683,7 +674,7 @@ void ILStubLinker::LogILStub(DWORD dwJitFlags, SString *pDumpILStubCode)
INT iCurStack = 0;
if (pDumpILStubCode == NULL)
- LogJitFlags(LF_STUBS, LL_INFO1000, dwJitFlags);
+ LogJitFlags(LF_STUBS, LL_INFO1000, jitFlags);
while (pCurrentStream)
{
@@ -841,7 +832,7 @@ size_t ILStubLinker::Link(UINT* puMaxStack)
#ifdef _DEBUG
if (fStackUnderflow)
{
- LogILStub(NULL);
+ LogILStub(CORJIT_FLAGS());
CONSISTENCY_CHECK_MSG(false, "IL stack underflow! -- see logging output");
}
#endif // _DEBUG
diff --git a/src/vm/stubgen.h b/src/vm/stubgen.h
index e6d3f9ec4d..7bebfa7610 100644
--- a/src/vm/stubgen.h
+++ b/src/vm/stubgen.h
@@ -431,7 +431,7 @@ public:
void ClearCodeStreams();
- void LogILStub(DWORD dwJitFlags, SString *pDumpILStubCode = NULL);
+ void LogILStub(CORJIT_FLAGS jitFlags, SString *pDumpILStubCode = NULL);
protected:
void LogILStubWorker(ILInstruction* pInstrBuffer, UINT numInstr, size_t* pcbCode, INT* piCurStack, SString *pDumpILStubCode = NULL);
void LogILInstruction(size_t curOffset, bool isLabeled, INT iCurStack, ILInstruction* pInstruction, SString *pDumpILStubCode = NULL);
diff --git a/src/vm/stubhelpers.cpp b/src/vm/stubhelpers.cpp
index 6e7fb49b96..cbe1d37c94 100644
--- a/src/vm/stubhelpers.cpp
+++ b/src/vm/stubhelpers.cpp
@@ -19,7 +19,7 @@
#include "security.h"
#include "eventtrace.h"
#include "comdatetime.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "interoputil.h"
#include "gcscan.h"
#ifdef FEATURE_REMOTING
@@ -70,7 +70,7 @@ void StubHelpers::ValidateObjectInternal(Object *pObjUNSAFE, BOOL fValidateNextO
// and the next object as required
if (fValidateNextObj)
{
- Object *nextObj = GCHeap::GetGCHeap()->NextObj(pObjUNSAFE);
+ Object *nextObj = GCHeapUtilities::GetGCHeap()->NextObj(pObjUNSAFE);
if (nextObj != NULL)
{
// Note that the MethodTable of the object (i.e. the pointer at offset 0) can change from
@@ -162,7 +162,7 @@ void StubHelpers::ProcessByrefValidationList()
{
entry = s_ByrefValidationEntries[i];
- Object *pObjUNSAFE = GCHeap::GetGCHeap()->GetGCHeap()->GetContainingObject(entry.pByref);
+ Object *pObjUNSAFE = GCHeapUtilities::GetGCHeap()->GetContainingObject(entry.pByref);
ValidateObjectInternal(pObjUNSAFE, TRUE);
}
}
@@ -2004,7 +2004,7 @@ FCIMPL3(void, StubHelpers::ValidateObject, Object *pObjUNSAFE, MethodDesc *pMD,
AVInRuntimeImplOkayHolder AVOkay;
// don't validate the next object if a BGC is in progress. we can race with background
// sweep which could make the next object a Free object underneath us if it's dead.
- ValidateObjectInternal(pObjUNSAFE, !(GCHeap::GetGCHeap()->IsConcurrentGCInProgress()));
+ ValidateObjectInternal(pObjUNSAFE, !(GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress()));
}
EX_CATCH
{
@@ -2031,7 +2031,7 @@ FCIMPL3(void, StubHelpers::ValidateByref, void *pByref, MethodDesc *pMD, Object
// perform the validation on next GC (see code:StubHelpers.ProcessByrefValidationList).
// Skip byref if is not pointing inside managed heap
- if (!GCHeap::GetGCHeap()->IsHeapPointer(pByref))
+ if (!GCHeapUtilities::GetGCHeap()->IsHeapPointer(pByref))
{
return;
}
@@ -2066,7 +2066,7 @@ FCIMPL3(void, StubHelpers::ValidateByref, void *pByref, MethodDesc *pMD, Object
if (NumOfEntries > BYREF_VALIDATION_LIST_MAX_SIZE)
{
// if the list is too big, trigger GC now
- GCHeap::GetGCHeap()->GarbageCollect(0);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(0);
}
HELPER_METHOD_FRAME_END();
diff --git a/src/vm/syncblk.cpp b/src/vm/syncblk.cpp
index 3975542d98..171a8d3bb7 100644
--- a/src/vm/syncblk.cpp
+++ b/src/vm/syncblk.cpp
@@ -1372,7 +1372,7 @@ void SyncBlockCache::GCWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintp
STRESS_LOG0 (LF_GC | LF_SYNC, LL_INFO100, "GCWeakPtrScan starting\n");
#endif
- if (GCHeap::GetGCHeap()->GetCondemnedGeneration() < GCHeap::GetGCHeap()->GetMaxGeneration())
+ if (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() < GCHeapUtilities::GetGCHeap()->GetMaxGeneration())
{
#ifdef VERIFY_HEAP
//for VSW 294550: we saw stale obeject reference in SyncBlkCache, so we want to make sure the card
@@ -1416,7 +1416,7 @@ void SyncBlockCache::GCWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintp
Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
if (o && !((size_t)o & 1))
{
- if (GCHeap::GetGCHeap()->IsEphemeral (o))
+ if (GCHeapUtilities::GetGCHeap()->IsEphemeral (o))
{
clear_card = FALSE;
@@ -1615,8 +1615,8 @@ void SyncBlockCache::GCDone(BOOL demoting, int max_gen)
CONTRACTL_END;
if (demoting &&
- (GCHeap::GetGCHeap()->GetCondemnedGeneration() ==
- GCHeap::GetGCHeap()->GetMaxGeneration()))
+ (GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration() ==
+ GCHeapUtilities::GetGCHeap()->GetMaxGeneration()))
{
//scan the bitmap
size_t dw = 0;
@@ -1643,7 +1643,7 @@ void SyncBlockCache::GCDone(BOOL demoting, int max_gen)
Object* o = SyncTableEntry::GetSyncTableEntry()[nb].m_Object;
if (o && !((size_t)o & 1))
{
- if (GCHeap::GetGCHeap()->WhichGeneration (o) < (unsigned int)max_gen)
+ if (GCHeapUtilities::GetGCHeap()->WhichGeneration (o) < (unsigned int)max_gen)
{
SetCard (card);
break;
@@ -1713,7 +1713,7 @@ void SyncBlockCache::VerifySyncTableEntry()
DWORD idx = o->GetHeader()->GetHeaderSyncBlockIndex();
_ASSERTE(idx == nb || ((0 == idx) && (loop == max_iterations)));
- _ASSERTE(!GCHeap::GetGCHeap()->IsEphemeral(o) || CardSetP(CardOf(nb)));
+ _ASSERTE(!GCHeapUtilities::GetGCHeap()->IsEphemeral(o) || CardSetP(CardOf(nb)));
}
}
}
@@ -2498,10 +2498,10 @@ BOOL ObjHeader::Validate (BOOL bVerifySyncBlkIndex)
//BIT_SBLK_GC_RESERVE (0x20000000) is only set during GC. But for frozen object, we don't clean the bit
if (bits & BIT_SBLK_GC_RESERVE)
{
- if (!GCHeap::GetGCHeap()->IsGCInProgress () && !GCHeap::GetGCHeap()->IsConcurrentGCInProgress ())
+ if (!GCHeapUtilities::IsGCInProgress () && !GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress ())
{
#ifdef FEATURE_BASICFREEZE
- ASSERT_AND_CHECK (GCHeap::GetGCHeap()->IsInFrozenSegment(obj));
+ ASSERT_AND_CHECK (GCHeapUtilities::GetGCHeap()->IsInFrozenSegment(obj));
#else //FEATURE_BASICFREEZE
_ASSERTE(!"Reserve bit not cleared");
return FALSE;
diff --git a/src/vm/syncclean.cpp b/src/vm/syncclean.cpp
index e02c2f90c2..133f448e16 100644
--- a/src/vm/syncclean.cpp
+++ b/src/vm/syncclean.cpp
@@ -73,7 +73,7 @@ void SyncClean::CleanUp ()
// Only GC thread can call this.
_ASSERTE (g_fProcessDetach ||
IsGCSpecialThread() ||
- (GCHeap::IsGCInProgress() && GetThread() == ThreadSuspend::GetSuspensionThread()));
+ (GCHeapUtilities::IsGCInProgress() && GetThread() == ThreadSuspend::GetSuspensionThread()));
if (m_HashMap)
{
Bucket * pTempBucket = FastInterlockExchangePointer(m_HashMap.GetPointer(), NULL);
diff --git a/src/vm/testhookmgr.cpp b/src/vm/testhookmgr.cpp
index 9ec53f8e45..48370134d2 100644
--- a/src/vm/testhookmgr.cpp
+++ b/src/vm/testhookmgr.cpp
@@ -655,7 +655,7 @@ HRESULT CLRTestHookManager::GC(int generation)
CONTRACTL_END;
_ASSERTE(GetThread()==NULL || !GetThread()->PreemptiveGCDisabled());
- GCHeap::GetGCHeap()->GarbageCollect(generation);
+ GCHeapUtilities::GetGCHeap()->GarbageCollect(generation);
FinalizerThread::FinalizerThreadWait();
return S_OK;
}
diff --git a/src/vm/threadpoolrequest.cpp b/src/vm/threadpoolrequest.cpp
index 8d47e6b810..a5c1c4263d 100644
--- a/src/vm/threadpoolrequest.cpp
+++ b/src/vm/threadpoolrequest.cpp
@@ -517,11 +517,11 @@ void UnManagedPerAppDomainTPCount::DispatchWorkItem(bool* foundWork, bool* wasNo
firstIteration = false;
*foundWork = true;
- if (GCHeap::IsGCInProgress(TRUE))
+ if (GCHeapUtilities::IsGCInProgress(TRUE))
{
// GC is imminent, so wait until GC is complete before executing next request.
// this reduces in-flight objects allocated right before GC, easing the GC's work
- GCHeap::WaitForGCCompletion(TRUE);
+ GCHeapUtilities::WaitForGCCompletion(TRUE);
}
PREFIX_ASSUME(pWorkRequest != NULL);
diff --git a/src/vm/threads.cpp b/src/vm/threads.cpp
index cc2e4eb5e4..38094ca8b5 100644
--- a/src/vm/threads.cpp
+++ b/src/vm/threads.cpp
@@ -18,7 +18,7 @@
#include "excep.h"
#include "comsynchronizable.h"
#include "log.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "mscoree.h"
#include "dbginterface.h"
#include "corprof.h" // profiling
@@ -2022,8 +2022,10 @@ Thread::Thread()
m_ppvHJRetAddrPtr = (VOID**) 0xCCCCCCCCCCCCCCCC;
m_pvHJRetAddr = (VOID*) 0xCCCCCCCCCCCCCCCC;
+#ifndef PLATFORM_UNIX
X86_ONLY(m_LastRedirectIP = 0);
X86_ONLY(m_SpinCount = 0);
+#endif // PLATFORM_UNIX
#endif // FEATURE_HIJACK
#if defined(_DEBUG) && defined(TRACK_SYNC)
@@ -2232,7 +2234,7 @@ Thread::Thread()
m_fGCSpecial = FALSE;
-#if !defined(FEATURE_CORECLR)
+#if !defined(FEATURE_PAL)
m_wCPUGroup = 0;
m_pAffinityMask = 0;
#endif
@@ -3889,14 +3891,14 @@ void Thread::OnThreadTerminate(BOOL holdingLock)
#endif
}
- if (GCHeap::IsGCHeapInitialized())
+ if (GCHeapUtilities::IsGCHeapInitialized())
{
// Guaranteed to NOT be a shutdown case, because we tear down the heap before
// we tear down any threads during shutdown.
if (ThisThreadID == CurrentThreadID)
{
GCX_COOP();
- GCHeap::GetGCHeap()->FixAllocContext(&m_alloc_context, FALSE, NULL, NULL);
+ GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, FALSE, NULL, NULL);
m_alloc_context.init();
}
}
@@ -3957,11 +3959,11 @@ void Thread::OnThreadTerminate(BOOL holdingLock)
#endif
}
- if (GCHeap::IsGCHeapInitialized() && ThisThreadID != CurrentThreadID)
+ if (GCHeapUtilities::IsGCHeapInitialized() && ThisThreadID != CurrentThreadID)
{
// We must be holding the ThreadStore lock in order to clean up alloc context.
// We should never call FixAllocContext during GC.
- GCHeap::GetGCHeap()->FixAllocContext(&m_alloc_context, FALSE, NULL, NULL);
+ GCHeapUtilities::GetGCHeap()->FixAllocContext(&m_alloc_context, FALSE, NULL, NULL);
m_alloc_context.init();
}
@@ -8245,7 +8247,9 @@ void CheckRegDisplaySP (REGDISPLAY *pRD)
{
if (pRD->SP && pRD->_pThread)
{
+#ifndef NO_FIXED_STACK_LIMIT
_ASSERTE(PTR_VOID(pRD->SP) >= pRD->_pThread->GetCachedStackLimit());
+#endif // NO_FIXED_STACK_LIMIT
_ASSERTE(PTR_VOID(pRD->SP) < pRD->_pThread->GetCachedStackBase());
}
}
@@ -9159,89 +9163,7 @@ void Thread::ReturnToContextAndOOM(ContextTransitionFrame* pFrame)
COMPlusThrowOM();
}
-
-#ifdef FEATURE_CORECLR
-
-//---------------------------------------------------------------------------------------
-// Allocates an agile CrossAppDomainMarshaledException whose ToString() and ErrorCode
-// matches the original exception.
-//
-// This is our "remoting" story for exceptions that leak across appdomains in Telesto.
-//---------------------------------------------------------------------------------------
-static OBJECTREF WrapThrowableInCrossAppDomainMarshaledException(OBJECTREF pOriginalThrowable)
-{
- CONTRACTL
- {
- GC_TRIGGERS;
- THROWS;
- MODE_COOPERATIVE;
- }
- CONTRACTL_END;
-
- _ASSERTE(GetThread() != NULL);
-
-
- struct _gc
- {
- OBJECTREF pOriginalThrowable;
- OBJECTREF pThrowable;
- STRINGREF pOriginalMessage;
- }
- prot;
-
-
- memset(&prot, 0, sizeof(prot));
-
- GCPROTECT_BEGIN(prot);
- prot.pOriginalThrowable = pOriginalThrowable;
- prot.pOriginalMessage = GetExceptionMessage(prot.pOriginalThrowable);
- HRESULT originalHResult = GetExceptionHResult(prot.pOriginalThrowable);
-
- MethodTable *pMT = MscorlibBinder::GetClass(CLASS__CROSSAPPDOMAINMARSHALEDEXCEPTION);
- prot.pThrowable = AllocateObject(pMT);
-
- MethodDescCallSite exceptionCtor(METHOD__CROSSAPPDOMAINMARSHALEDEXCEPTION__STR_INT_CTOR);
-
- ARG_SLOT args1[] = {
- ObjToArgSlot(prot.pThrowable),
- ObjToArgSlot(prot.pOriginalMessage),
- (ARG_SLOT)originalHResult,
- };
- exceptionCtor.Call(args1);
-
-#ifndef FEATURE_PAL
- // Since, on CoreCLR, we dont have serialization of exceptions going across
- // AD transition boundaries, we will copy over the bucket details to the
- // CrossAppDomainMarshalledException object from the original exception object
- // if it isnt a preallocated exception.
- if (IsWatsonEnabled() && (!CLRException::IsPreallocatedExceptionObject(prot.pOriginalThrowable)))
- {
- // If the watson buckets are present, then copy them over.
- // They maybe missing if the original throwable couldnt get them from Watson helper functions
- // during SetupInitialThrowBucketDetails due to OOM.
- if (((EXCEPTIONREF)prot.pOriginalThrowable)->AreWatsonBucketsPresent())
- {
- _ASSERTE(prot.pThrowable != NULL);
- // Copy them to CrossADMarshalledException object
- CopyWatsonBucketsBetweenThrowables(prot.pOriginalThrowable, prot.pThrowable);
-
- // The exception object should now have the buckets inside it
- _ASSERTE(((EXCEPTIONREF)prot.pThrowable)->AreWatsonBucketsPresent());
- }
- }
-#endif // !FEATURE_PAL
-
- GCPROTECT_END(); //Prot
-
-
- return prot.pThrowable;
-}
-
-
-
-#endif
-
-
+#ifdef FEATURE_REMOTING
// for cases when marshaling is not needed
// throws it is able to take a shortcut, otherwise just returns
void Thread::RaiseCrossContextExceptionHelper(Exception* pEx, ContextTransitionFrame* pFrame)
@@ -9411,15 +9333,7 @@ Thread::TryRaiseCrossContextException(Exception **ppExOrig,
*ppThrowable = CLRException::GetThrowableFromException(exception);
_ASSERTE(*ppThrowable != NULL);
-#ifdef FEATURE_CORECLR
- (*pOrBlob) = WrapThrowableInCrossAppDomainMarshaledException(*ppThrowable);
-#if CHECK_APP_DOMAIN_LEAKS
- (*pOrBlob)->SetAppDomainAgile();
-#endif //CHECK_APP_DOMAIN_LEAKS
-#else
AppDomainHelper::MarshalObject(ppThrowable, pOrBlob);
-#endif //FEATURE_CORECLR
-
}
}
EX_CATCH
@@ -9598,6 +9512,25 @@ void DECLSPEC_NORETURN Thread::RaiseCrossContextException(Exception* pExOrig, Co
}
}
+#else // FEATURE_REMOTING
+
+void DECLSPEC_NORETURN Thread::RaiseCrossContextException(Exception* pExOrig, ContextTransitionFrame* pFrame)
+{
+ CONTRACTL
+ {
+ THROWS;
+ WRAPPER(GC_TRIGGERS);
+ }
+ CONTRACTL_END;
+
+ // pEx is NULL means that the exception is CLRLastThrownObjectException
+ CLRLastThrownObjectException lastThrown;
+ Exception* pException = pExOrig ? pExOrig : &lastThrown;
+ COMPlusThrow(CLRException::GetThrowableFromException(pException));
+}
+
+#endif
+
struct FindADCallbackType {
AppDomain *pSearchDomain;
AppDomain *pPrevDomain;
@@ -9846,7 +9779,7 @@ void Thread::DoExtraWorkForFinalizer()
Thread::CleanupDetachedThreads();
}
- if(ExecutionManager::IsCacheCleanupRequired() && GCHeap::GetGCHeap()->GetCondemnedGeneration()>=1)
+ if(ExecutionManager::IsCacheCleanupRequired() && GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration()>=1)
{
ExecutionManager::ClearCaches();
}
@@ -11186,7 +11119,7 @@ void Thread::SetHasPromotedBytes ()
m_fPromoted = TRUE;
- _ASSERTE(GCHeap::IsGCInProgress() && IsGCThread ());
+ _ASSERTE(GCHeapUtilities::IsGCInProgress() && IsGCThread ());
if (!m_fPreemptiveGCDisabled)
{
@@ -11616,7 +11549,7 @@ HRESULT Thread::GetMemStats (COR_GC_THREAD_STATS *pStats)
CONTRACTL_END;
// Get the allocation context which contains this counter in it.
- alloc_context *p = &m_alloc_context;
+ gc_alloc_context *p = &m_alloc_context;
pStats->PerThreadAllocation = p->alloc_bytes + p->alloc_bytes_loh;
if (GetHasPromotedBytes())
pStats->Flags = COR_GC_THREAD_HAS_PROMOTED_BYTES;
diff --git a/src/vm/threads.h b/src/vm/threads.h
index ec047f2ddd..144e17c591 100644
--- a/src/vm/threads.h
+++ b/src/vm/threads.h
@@ -142,7 +142,7 @@
#include "regdisp.h"
#include "mscoree.h"
#include "appdomainstack.h"
-#include "gc.h"
+#include "gcheaputilities.h"
#include "gcinfotypes.h"
#include <clrhost.h>
@@ -1072,7 +1072,7 @@ class Thread: public IUnknown
friend DWORD MapWin32FaultToCOMPlusException(EXCEPTION_RECORD *pExceptionRecord);
friend void STDCALL OnHijackWorker(HijackArgs * pArgs);
#ifdef PLATFORM_UNIX
- friend void PALAPI HandleGCSuspensionForInterruptedThread(CONTEXT *interruptedContext);
+ friend void HandleGCSuspensionForInterruptedThread(CONTEXT *interruptedContext);
#endif // PLATFORM_UNIX
#endif // FEATURE_HIJACK
@@ -1739,9 +1739,9 @@ public:
// on MP systems, each thread has its own allocation chunk so we can avoid
// lock prefixes and expensive MP cache snooping stuff
- alloc_context m_alloc_context;
+ gc_alloc_context m_alloc_context;
- inline alloc_context *GetAllocContext() { LIMITED_METHOD_CONTRACT; return &m_alloc_context; }
+ inline gc_alloc_context *GetAllocContext() { LIMITED_METHOD_CONTRACT; return &m_alloc_context; }
// This is the type handle of the first object in the alloc context at the time
// we fire the AllocationTick event. It's only for tooling purpose.
@@ -2796,7 +2796,8 @@ public:
CONTRACTL_END;
return (ObjectFromHandle(m_ExposedObject) != NULL) ;
}
-#ifndef FEATURE_CORECLR
+
+#ifdef FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
void GetSynchronizationContext(OBJECTREF *pSyncContextObj)
{
CONTRACTL
@@ -2814,7 +2815,8 @@ public:
if (ExposedThreadObj != NULL)
*pSyncContextObj = ExposedThreadObj->GetSynchronizationContext();
}
-#endif //!FEATURE_CORECLR
+#endif // FEATURE_SYNCHRONIZATIONCONTEXT_WAIT
+
#ifdef FEATURE_COMPRESSEDSTACK
OBJECTREF GetCompressedStack()
{
@@ -4884,7 +4886,7 @@ private:
private:
// When we create an object, or create an OBJECTREF, or create an Interior Pointer, or enter EE from managed
// code, we will set this flag.
- // Inside GCHeap::StressHeap, we only do GC if this flag is TRUE. Then we reset it to zero.
+ // Inside GCHeapUtilities::StressHeap, we only do GC if this flag is TRUE. Then we reset it to zero.
BOOL m_fStressHeapCount;
public:
void EnableStressHeap()
diff --git a/src/vm/threadsuspend.cpp b/src/vm/threadsuspend.cpp
index c71855f45f..2e6563da1e 100644
--- a/src/vm/threadsuspend.cpp
+++ b/src/vm/threadsuspend.cpp
@@ -754,6 +754,7 @@ static StackWalkAction TAStackCrawlCallBackWorker(CrawlFrame* pCf, StackCrawlCon
}
#undef METHODNAME
+#ifdef FEATURE_CER
// If we're asking about CERs and we don't yet have a definite answer either way then take a closer look at the current method.
if (pData->eType & StackCrawlContext::SCC_CheckWithinCer && !pData->fUnprotectedCode && !pData->fWithinCer)
{
@@ -775,6 +776,7 @@ static StackWalkAction TAStackCrawlCallBackWorker(CrawlFrame* pCf, StackCrawlCon
pData->fUnprotectedCode = true;
}
}
+#endif // FEATURE_CER
// If we weren't asked about EH clauses then we can return now (stop the stack trace if we have a definitive answer on the CER
// question, move to the next frame otherwise).
@@ -1122,6 +1124,7 @@ struct CerStackCrawlContext
bool m_fWithinCer; // The result
};
+#ifdef FEATURE_CER
// Callback used on the stack crawl described above.
StackWalkAction CerStackCrawlCallBack(CrawlFrame *pCf, void *pData)
{
@@ -1164,6 +1167,7 @@ StackWalkAction CerStackCrawlCallBack(CrawlFrame *pCf, void *pData)
// Otherwise everything looks OK so far and we need to investigate the next frame.
return SWA_CONTINUE;
}
+#endif // FEATURE_CER
// Determine whether the method at the given depth in the thread's execution stack is executing within a CER.
BOOL Thread::IsWithinCer(CrawlFrame *pCf)
@@ -1175,6 +1179,9 @@ BOOL Thread::IsWithinCer(CrawlFrame *pCf)
}
CONTRACTL_END;
+#ifndef FEATURE_CER
+ return FALSE;
+#else
// There had better be a method associated with this frame.
MethodDesc *pMD = pCf->GetFunction();
_ASSERTE(pMD != NULL);
@@ -1291,6 +1298,7 @@ BOOL Thread::IsWithinCer(CrawlFrame *pCf)
_ASSERTE(!sContext.m_fFirstFrame);
return sContext.m_fWithinCer;
+#endif // FEATURE_CER
}
#if defined(_TARGET_AMD64_) && defined(FEATURE_HIJACK)
@@ -3276,7 +3284,7 @@ void Thread::RareDisablePreemptiveGC()
__SwitchToThread(0, CALLER_LIMITS_SPINNING);
}
- if (!GCHeap::IsGCHeapInitialized())
+ if (!GCHeapUtilities::IsGCHeapInitialized())
{
goto Exit;
}
@@ -3284,7 +3292,7 @@ void Thread::RareDisablePreemptiveGC()
// Note IsGCInProgress is also true for say Pause (anywhere SuspendEE happens) and GCThread is the
// thread that did the Pause. While in Pause if another thread attempts Rev/Pinvoke it should get inside the following and
// block until resume
- if (((GCHeap::IsGCInProgress() && (this != ThreadSuspend::GetSuspensionThread())) ||
+ if (((GCHeapUtilities::IsGCInProgress() && (this != ThreadSuspend::GetSuspensionThread())) ||
(m_State & (TS_UserSuspendPending | TS_DebugSuspendPending | TS_StackCrawlNeeded))) &&
(!g_fSuspendOnShutdown || IsFinalizerThread() || IsShutdownSpecialThread()))
{
@@ -3350,7 +3358,7 @@ void Thread::RareDisablePreemptiveGC()
DWORD status = S_OK;
SetThreadStateNC(TSNC_WaitUntilGCFinished);
- status = GCHeap::GetGCHeap()->WaitUntilGCComplete();
+ status = GCHeapUtilities::GetGCHeap()->WaitUntilGCComplete();
ResetThreadStateNC(TSNC_WaitUntilGCFinished);
if (status == (DWORD)COR_E_STACKOVERFLOW)
@@ -3359,7 +3367,7 @@ void Thread::RareDisablePreemptiveGC()
// 1. GC is suspending the process. GC needs to wait.
// 2. GC is proceeding after suspension. The current thread needs to spin.
SetThreadState(TS_BlockGCForSO);
- while (GCHeap::IsGCInProgress() && m_fPreemptiveGCDisabled.Load() == 0)
+ while (GCHeapUtilities::IsGCInProgress() && m_fPreemptiveGCDisabled.Load() == 0)
{
#undef Sleep
// We can not go to a host for blocking operation due ot lack of stack.
@@ -3376,7 +3384,7 @@ void Thread::RareDisablePreemptiveGC()
break;
}
}
- if (!GCHeap::IsGCInProgress())
+ if (!GCHeapUtilities::IsGCInProgress())
{
if (HasThreadState(TS_StackCrawlNeeded))
{
@@ -3411,7 +3419,7 @@ void Thread::RareDisablePreemptiveGC()
// thread while in this loop. This happens if you use the COM+
// debugger to suspend this thread and then release it.
- } while ((GCHeap::IsGCInProgress() && (this != ThreadSuspend::GetSuspensionThread())) ||
+ } while ((GCHeapUtilities::IsGCInProgress() && (this != ThreadSuspend::GetSuspensionThread())) ||
(m_State & (TS_UserSuspendPending | TS_DebugSuspendPending | TS_StackCrawlNeeded)));
}
STRESS_LOG0(LF_SYNC, LL_INFO1000, "RareDisablePreemptiveGC: leaving\n");
@@ -3705,7 +3713,7 @@ void Thread::PerformPreemptiveGC()
if (!GCStressPolicy::IsEnabled() || !GCStress<cfg_transition>::IsEnabled())
return;
- if (!GCHeap::IsGCHeapInitialized())
+ if (!GCHeapUtilities::IsGCHeapInitialized())
return;
if (!m_GCOnTransitionsOK
@@ -3713,8 +3721,8 @@ void Thread::PerformPreemptiveGC()
|| RawGCNoTrigger()
#endif
|| g_fEEShutDown
- || GCHeap::IsGCInProgress(TRUE)
- || GCHeap::GetGCHeap()->GetGcCount() == 0 // Need something that works for isolated heap.
+ || GCHeapUtilities::IsGCInProgress(TRUE)
+ || GCHeapUtilities::GetGCHeap()->GetGcCount() == 0 // Need something that works for isolated heap.
|| ThreadStore::HoldingThreadStore())
return;
@@ -3738,7 +3746,7 @@ void Thread::PerformPreemptiveGC()
{
GCX_COOP();
m_bGCStressing = TRUE;
- GCHeap::GetGCHeap()->StressHeap();
+ GCHeapUtilities::GetGCHeap()->StressHeap();
m_bGCStressing = FALSE;
}
m_GCOnTransitionsOK = TRUE;
@@ -4846,7 +4854,7 @@ HRESULT ThreadSuspend::SuspendRuntime(ThreadSuspend::SUSPEND_REASON reason)
// Caller is expected to be holding the ThreadStore lock. Also, caller must
// have set GcInProgress before coming here, or things will break;
_ASSERTE(ThreadStore::HoldingThreadStore() || IsAtProcessExit());
- _ASSERTE(GCHeap::IsGCInProgress() );
+ _ASSERTE(GCHeapUtilities::IsGCInProgress() );
STRESS_LOG1(LF_SYNC, LL_INFO1000, "Thread::SuspendRuntime(reason=0x%x)\n", reason);
@@ -5547,7 +5555,7 @@ void ThreadSuspend::ResumeRuntime(BOOL bFinishedGC, BOOL SuspendSucceded)
// reset GcInProgress, or threads will continue to suspend themselves and won't
// be resumed until the next GC.
_ASSERTE(IsGCSpecialThread() || ThreadStore::HoldingThreadStore());
- _ASSERTE(!GCHeap::IsGCInProgress() );
+ _ASSERTE(!GCHeapUtilities::IsGCInProgress() );
STRESS_LOG2(LF_SYNC, LL_INFO1000, "Thread::ResumeRuntime(finishedGC=%d, SuspendSucceeded=%d) - Start\n", bFinishedGC, SuspendSucceded);
@@ -5564,7 +5572,7 @@ void ThreadSuspend::ResumeRuntime(BOOL bFinishedGC, BOOL SuspendSucceded)
{
// If we the suspension was for a GC, tell the host what generation GC.
DWORD Generation = (bFinishedGC
- ? GCHeap::GetGCHeap()->GetCondemnedGeneration()
+ ? GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration()
: ~0U);
pGCThreadControl->SuspensionEnding(Generation);
@@ -5574,7 +5582,7 @@ void ThreadSuspend::ResumeRuntime(BOOL bFinishedGC, BOOL SuspendSucceded)
{
// If we the suspension was for a GC, tell the host what generation GC.
DWORD Generation = (bFinishedGC
- ? GCHeap::GetGCHeap()->GetCondemnedGeneration()
+ ? GCHeapUtilities::GetGCHeap()->GetCondemnedGeneration()
: ~0U);
BEGIN_SO_TOLERANT_CODE_CALLING_HOST(GetThread());
@@ -7275,18 +7283,8 @@ ReturnKind GetReturnKindFromMethodTable(Thread *pThread, EECodeInfo *codeInfo)
ReturnKind GetReturnKind(Thread *pThread, EECodeInfo *codeInfo)
{
- ReturnKind returnKind = RT_Illegal;
-
-#ifdef _TARGET_X86_
- // X86 GCInfo updates yet to be implemented.
-#else
GCInfoToken gcInfoToken = codeInfo->GetGCInfoToken();
- if (gcInfoToken.IsReturnKindAvailable())
- {
- GcInfoDecoder gcInfoDecoder(gcInfoToken, DECODE_RETURN_KIND);
- returnKind = gcInfoDecoder.GetReturnKind();
- }
-#endif // _TARGET_X86_
+ ReturnKind returnKind = codeInfo->GetCodeManager()->GetReturnKind(gcInfoToken);
if (!IsValidReturnKind(returnKind))
{
@@ -7312,7 +7310,7 @@ VOID * GetHijackAddr(Thread *pThread, EECodeInfo *codeInfo)
#ifdef _TARGET_X86_
if (returnKind == RT_Float)
{
- return reinterpret_cast<VOID *>(OnHijackTripThread);
+ return reinterpret_cast<VOID *>(OnHijackFPTripThread);
}
#endif // _TARGET_X86_
@@ -7898,7 +7896,7 @@ void ThreadSuspend::RestartEE(BOOL bFinishedGC, BOOL SuspendSucceded)
// Revert to being a normal thread
//
ClrFlsClearThreadType (ThreadType_DynamicSuspendEE);
- GCHeap::GetGCHeap()->SetGCInProgress(FALSE);
+ GCHeapUtilities::GetGCHeap()->SetGCInProgress(FALSE);
//
// Allow threads to enter COOP mode (though we still need to wake the ones
@@ -7906,7 +7904,7 @@ void ThreadSuspend::RestartEE(BOOL bFinishedGC, BOOL SuspendSucceded)
//
// Note: this is the last barrier that keeps managed threads
// from entering cooperative mode. If the sequence changes,
- // you may have to change routine GCHeap::SafeToRestartManagedThreads
+ // you may have to change routine GCHeapUtilities::SafeToRestartManagedThreads
// as well.
//
ThreadStore::TrapReturningThreads(FALSE);
@@ -7915,7 +7913,7 @@ void ThreadSuspend::RestartEE(BOOL bFinishedGC, BOOL SuspendSucceded)
//
// Any threads that are waiting in WaitUntilGCComplete will continue now.
//
- GCHeap::GetGCHeap()->GetWaitForGCEvent()->Set();
+ GCHeapUtilities::GetGCHeap()->GetWaitForGCEvent()->Set();
_ASSERTE(IsGCSpecialThread() || ThreadStore::HoldingThreadStore());
ResumeRuntime(bFinishedGC, SuspendSucceded);
@@ -7964,7 +7962,7 @@ void ThreadSuspend::SuspendEE(SUSPEND_REASON reason)
ETW::GCLog::ETW_GC_INFO Info;
Info.SuspendEE.Reason = reason;
Info.SuspendEE.GcCount = (((reason == SUSPEND_FOR_GC) || (reason == SUSPEND_FOR_GC_PREP)) ?
- (ULONG)GCHeap::GetGCHeap()->GetGcCount() : (ULONG)-1);
+ (ULONG)GCHeapUtilities::GetGCHeap()->GetGcCount() : (ULONG)-1);
FireEtwGCSuspendEEBegin_V1(Info.SuspendEE.Reason, Info.SuspendEE.GcCount, GetClrInstanceId());
@@ -8041,7 +8039,7 @@ retry_for_debugger:
//
// First, we reset the event that we're about to tell other threads to wait for.
//
- GCHeap::GetGCHeap()->GetWaitForGCEvent()->Reset();
+ GCHeapUtilities::GetGCHeap()->GetWaitForGCEvent()->Reset();
//
// Remember that we're the one doing the GC. Actually, maybe we're not doing a GC -
@@ -8066,7 +8064,7 @@ retry_for_debugger:
// It seems like much of the above is redundant. We should investigate reducing the number
// of mechanisms we use to indicate that a suspension is in progress.
//
- GCHeap::GetGCHeap()->SetGCInProgress(TRUE);
+ GCHeapUtilities::GetGCHeap()->SetGCInProgress(TRUE);
//
// Gratuitous memory barrier. (may be needed - but I'm not sure why.)
@@ -8214,7 +8212,7 @@ retry_for_debugger:
// This function is called by PAL to check if the specified instruction pointer
// is in a function where we can safely inject activation.
-BOOL PALAPI CheckActivationSafePoint(SIZE_T ip, BOOL checkingCurrentThread)
+BOOL CheckActivationSafePoint(SIZE_T ip, BOOL checkingCurrentThread)
{
Thread *pThread = GetThread();
// It is safe to call the ExecutionManager::IsManagedCode only if we are making the check for
@@ -8241,7 +8239,7 @@ BOOL PALAPI CheckActivationSafePoint(SIZE_T ip, BOOL checkingCurrentThread)
// address to take the thread to the appropriate stub (based on the return
// type of the method) which will then handle preparing the thread for GC.
//
-void PALAPI HandleGCSuspensionForInterruptedThread(CONTEXT *interruptedContext)
+void HandleGCSuspensionForInterruptedThread(CONTEXT *interruptedContext)
{
Thread *pThread = GetThread();
@@ -8357,7 +8355,7 @@ void ThreadSuspend::Initialize()
BOOL Debug_IsLockedViaThreadSuspension()
{
LIMITED_METHOD_CONTRACT;
- return GCHeap::IsGCInProgress() &&
+ return GCHeapUtilities::IsGCInProgress() &&
(dbgOnly_IsSpecialEEThread() ||
IsGCSpecialThread() ||
GetThread() == ThreadSuspend::GetSuspensionThread());
@@ -8485,7 +8483,7 @@ void SuspendStatistics::EndSuspend(BOOL bForGC)
// details on suspends...
if (!bForGC)
cntNonGCSuspends++;
- if (GCHeap::GetGCHeap()->IsConcurrentGCInProgress())
+ if (GCHeapUtilities::GetGCHeap()->IsConcurrentGCInProgress())
{
cntSuspendsInBGC++;
if (!bForGC)
diff --git a/src/vm/typeparse.cpp b/src/vm/typeparse.cpp
index 356cb78423..28521f1839 100644
--- a/src/vm/typeparse.cpp
+++ b/src/vm/typeparse.cpp
@@ -364,7 +364,7 @@ HRESULT __stdcall TypeName::GetAssemblyName(BSTR* pszAssemblyName)
return hr;
}
-#if !defined(FEATURE_CORECLR) && !defined(CROSSGEN_COMPILE)
+#if!defined(CROSSGEN_COMPILE)
SAFEHANDLE TypeName::GetSafeHandle()
{
CONTRACTL
@@ -588,7 +588,7 @@ void QCALLTYPE TypeName::QGetAssemblyName(TypeName * pTypeName, QCall::StringHan
END_QCALL;
}
-#endif //!FEATURE_CORECLR && !CROSSGEN_COMPILE
+#endif//!CROSSGEN_COMPILE
//
// TypeName::TypeNameParser
@@ -1926,7 +1926,7 @@ DomainAssembly * LoadDomainAssembly(
{
Exception *ex = GET_EXCEPTION();
- // Let non-File-not-found execeptions propagate
+ // Let non-File-not-found exceptions propagate
if (EEFileLoadException::GetFileLoadKind(ex->GetHR()) != kFileNotFoundException)
EX_RETHROW;
}
diff --git a/src/vm/typeparse.h b/src/vm/typeparse.h
index bf30730d46..00a3349ce2 100644
--- a/src/vm/typeparse.h
+++ b/src/vm/typeparse.h
@@ -311,14 +311,14 @@ public:
virtual ~TypeName();
public:
-#ifndef FEATURE_CORECLR
+#ifndef CROSSGEN_COMPILE
static void QCALLTYPE QCreateTypeNameParser (LPCWSTR wszTypeName, QCall::ObjectHandleOnStack pNames, BOOL throwOnError);
static void QCALLTYPE QReleaseTypeNameParser(TypeName * pTypeName);
static void QCALLTYPE QGetNames (TypeName * pTypeName, QCall::ObjectHandleOnStack pNames);
static void QCALLTYPE QGetTypeArguments (TypeName * pTypeName, QCall::ObjectHandleOnStack pTypeArguments);
static void QCALLTYPE QGetModifiers (TypeName * pTypeName, QCall::ObjectHandleOnStack pModifiers);
static void QCALLTYPE QGetAssemblyName (TypeName * pTypeName, QCall::StringHandleOnStack pAssemblyName);
-#endif //!FEATURE_CORECLR
+#endif //CROSSGEN_COMPILE
//-------------------------------------------------------------------------------------------
// Retrieves a type from an assembly. It requires the caller to know which assembly
@@ -451,10 +451,7 @@ private:
return GetTypeHaveAssemblyHelper(pAssembly, bThrowIfNotFound, bIgnoreCase, pKeepAlive, TRUE);
}
TypeHandle GetTypeHaveAssemblyHelper(Assembly* pAssembly, BOOL bThrowIfNotFound, BOOL bIgnoreCase, OBJECTREF *pKeepAlive, BOOL bRecurse);
-
-#ifndef FEATURE_CORECLR
SAFEHANDLE GetSafeHandle();
-#endif //!FEATURE_CORECLR
private:
BOOL m_bIsGenericArgument;
diff --git a/src/vm/util.cpp b/src/vm/util.cpp
index 2cb3460122..2cf6f7f31c 100644
--- a/src/vm/util.cpp
+++ b/src/vm/util.cpp
@@ -1950,17 +1950,18 @@ size_t GetLogicalProcessorCacheSizeFromOS()
// Crack the information. Iterate through all the SLPI array entries for all processors in system.
// Will return the greatest of all the processor cache sizes or zero
-
- size_t last_cache_size = 0;
-
- for (DWORD i=0; i < nEntries; i++)
{
- if (pslpi[i].Relationship == RelationCache)
+ size_t last_cache_size = 0;
+
+ for (DWORD i=0; i < nEntries; i++)
{
- last_cache_size = max(last_cache_size, pslpi[i].Cache.Size);
- }
- }
- cache_size = last_cache_size;
+ if (pslpi[i].Relationship == RelationCache)
+ {
+ last_cache_size = max(last_cache_size, pslpi[i].Cache.Size);
+ }
+ }
+ cache_size = last_cache_size;
+ }
Exit:
if(pslpi)
@@ -1991,6 +1992,9 @@ DWORD GetLogicalCpuCountFromOS()
DWORD nEntries = 0;
+ DWORD prevcount = 0;
+ DWORD count = 1;
+
// Try to use GetLogicalProcessorInformation API and get a valid pointer to the SLPI array if successful. Returns NULL
// if API not present or on failure.
SYSTEM_LOGICAL_PROCESSOR_INFORMATION *pslpi = IsGLPISupported(&nEntries) ;
@@ -2001,9 +2005,6 @@ DWORD GetLogicalCpuCountFromOS()
goto lDone;
}
- DWORD prevcount = 0;
- DWORD count = 1;
-
for (DWORD j = 0; j < nEntries; j++)
{
if (pslpi[j].Relationship == RelationProcessorCore)
@@ -2069,16 +2070,9 @@ lDone:
#define CACHE_PARTITION_BITS 0x003FF000 // number of cache Physical Partitions is returned in EBX[21:12] (10 bits) using cpuid function 4
#define CACHE_LINESIZE_BITS 0x00000FFF // Linesize returned in EBX[11:0] (12 bits) using cpuid function 4
-#if defined(_TARGET_X86_)
- // these are defined in cgenx86.cpp
- extern DWORD getcpuid(DWORD arg1, unsigned char result[16]);
- extern DWORD getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16]);
-#elif defined(_TARGET_AMD64_)
- // these are defined in src\VM\AMD64\asmhelpers.asm
- extern "C" DWORD __stdcall getcpuid(DWORD arg1, unsigned char result[16]);
- extern "C" DWORD __stdcall getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16]);
-#endif
-
+// these are defined in src\VM\AMD64\asmhelpers.asm / cgenx86.cpp
+extern "C" DWORD __stdcall getcpuid(DWORD arg1, unsigned char result[16]);
+extern "C" DWORD __stdcall getextcpuid(DWORD arg1, DWORD arg2, unsigned char result[16]);
// The following function uses a deterministic mechanism for enumerating/calculating the details of the cache hierarychy at runtime
// by using deterministic cache parameter leafs on Prescott and higher processors.
@@ -2557,7 +2551,7 @@ extern BOOL EEHeapFreeInProcessHeap(DWORD dwFlags, LPVOID lpMem);
extern void ShutdownRuntimeWithoutExiting(int exitCode);
extern BOOL IsRuntimeStarted(DWORD *pdwStartupFlags);
-void * GetCLRFunction(LPCSTR FunctionName)
+void * __stdcall GetCLRFunction(LPCSTR FunctionName)
{
void* func = NULL;
diff --git a/src/vm/vars.cpp b/src/vm/vars.cpp
index b737e66cd5..626ca3c9d3 100644
--- a/src/vm/vars.cpp
+++ b/src/vm/vars.cpp
@@ -69,6 +69,9 @@ GPTR_IMPL(MethodTable, g_pStringClass);
GPTR_IMPL(MethodTable, g_pArrayClass);
GPTR_IMPL(MethodTable, g_pSZArrayHelperClass);
GPTR_IMPL(MethodTable, g_pNullableClass);
+#ifdef FEATURE_SPAN_OF_T
+GPTR_IMPL(MethodTable, g_pByReferenceClass);
+#endif
GPTR_IMPL(MethodTable, g_pExceptionClass);
GPTR_IMPL(MethodTable, g_pThreadAbortExceptionClass);
GPTR_IMPL(MethodTable, g_pOutOfMemoryExceptionClass);
@@ -79,8 +82,12 @@ GPTR_IMPL(MethodTable, g_pMulticastDelegateClass);
GPTR_IMPL(MethodTable, g_pValueTypeClass);
GPTR_IMPL(MethodTable, g_pEnumClass);
GPTR_IMPL(MethodTable, g_pThreadClass);
+#ifdef FEATURE_CER
GPTR_IMPL(MethodTable, g_pCriticalFinalizerObjectClass);
+#endif
+#ifndef FEATURE_CORECLR
GPTR_IMPL(MethodTable, g_pAsyncFileStream_AsyncResultClass);
+#endif // !FEATURE_CORECLR
GPTR_IMPL(MethodTable, g_pFreeObjectMethodTable);
GPTR_IMPL(MethodTable, g_pOverlappedDataClass);
@@ -98,7 +105,9 @@ GPTR_IMPL(MethodTable, g_pICastableInterface);
#endif // FEATURE_ICASTABLE
+#ifdef FEATURE_CER
GPTR_IMPL(MethodDesc, g_pPrepareConstrainedRegionsMethod);
+#endif
GPTR_IMPL(MethodDesc, g_pExecuteBackoutCodeHelperMethod);
GPTR_IMPL(MethodDesc, g_pObjectCtorMD);
diff --git a/src/vm/vars.hpp b/src/vm/vars.hpp
index d197e0559d..62d6656eaf 100644
--- a/src/vm/vars.hpp
+++ b/src/vm/vars.hpp
@@ -81,7 +81,7 @@ typedef unsigned short wchar_t;
class ClassLoader;
class LoaderHeap;
-class GCHeap;
+class IGCHeap;
class Object;
class StringObject;
class TransparentProxyObject;
@@ -402,6 +402,9 @@ GPTR_DECL(MethodTable, g_pStringClass);
GPTR_DECL(MethodTable, g_pArrayClass);
GPTR_DECL(MethodTable, g_pSZArrayHelperClass);
GPTR_DECL(MethodTable, g_pNullableClass);
+#ifdef FEATURE_SPAN_OF_T
+GPTR_DECL(MethodTable, g_pByReferenceClass);
+#endif
GPTR_DECL(MethodTable, g_pExceptionClass);
GPTR_DECL(MethodTable, g_pThreadAbortExceptionClass);
GPTR_DECL(MethodTable, g_pOutOfMemoryExceptionClass);
@@ -414,8 +417,12 @@ GPTR_DECL(MethodTable, g_pFreeObjectMethodTable);
GPTR_DECL(MethodTable, g_pValueTypeClass);
GPTR_DECL(MethodTable, g_pEnumClass);
GPTR_DECL(MethodTable, g_pThreadClass);
+#ifdef FEATURE_CER
GPTR_DECL(MethodTable, g_pCriticalFinalizerObjectClass);
+#endif
+#ifndef FEATURE_CORECLR
GPTR_DECL(MethodTable, g_pAsyncFileStream_AsyncResultClass);
+#endif // !FEATURE_CORECLR
GPTR_DECL(MethodTable, g_pOverlappedDataClass);
GPTR_DECL(MethodTable, g_TypedReferenceMT);
@@ -431,7 +438,9 @@ GPTR_DECL(MethodTable, g_pBaseRuntimeClass);
GPTR_DECL(MethodTable, g_pICastableInterface);
#endif // FEATURE_ICASTABLE
+#ifdef FEATURE_CER
GPTR_DECL(MethodDesc, g_pPrepareConstrainedRegionsMethod);
+#endif
GPTR_DECL(MethodDesc, g_pExecuteBackoutCodeHelperMethod);
GPTR_DECL(MethodDesc, g_pObjectCtorMD);
diff --git a/src/vm/virtualcallstub.cpp b/src/vm/virtualcallstub.cpp
index 512b4f2b36..5fc66f6d6a 100644
--- a/src/vm/virtualcallstub.cpp
+++ b/src/vm/virtualcallstub.cpp
@@ -1107,7 +1107,7 @@ BOOL VirtualCallStubManager::TraceManager(Thread *thread,
#ifdef FEATURE_PREJIT
// This is the case for the lazy slot fixup
- if (GetIP(pContext) == GFN_TADDR(StubDispatchFixupPatchLabel)) {
+ if (GetIP(pContext) == GetEEFuncEntryPoint(StubDispatchFixupPatchLabel)) {
*pRetAddr = (BYTE *)StubManagerHelpers::GetReturnAddress(pContext);
diff --git a/src/vm/vm.settings b/src/vm/vm.settings
index b4799d1b37..83d7d9f72e 100644
--- a/src/vm/vm.settings
+++ b/src/vm/vm.settings
@@ -1,5 +1,12 @@
<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <PropertyGroup>
+ <!-- Work around problems with loading System.Private.CoreLib.dll, -->
+ <!-- caused by inconsistent setting of UseLegacyCompiler and FeatureSpanOfT -->
+ <!-- between System.Private.CoreLib.dll and the runtime. -->
+ <UseLegacyCompiler>true</UseLegacyCompiler>
+ </PropertyGroup>
+
<!--Import the settings-->
<Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\clr.props" />
<Import Project="$(_NTDRIVE)$(_NTROOT)\ndp\clr\src\debug\SetDebugTargetLocal.props" />
diff --git a/src/vm/win32threadpool.cpp b/src/vm/win32threadpool.cpp
index 1121417492..e8a05c383f 100644
--- a/src/vm/win32threadpool.cpp
+++ b/src/vm/win32threadpool.cpp
@@ -532,7 +532,9 @@ BOOL ThreadpoolMgr::SetMaxThreadsHelper(DWORD MaxWorkerThreads,
CrstHolder csh(&WorkerCriticalSection);
if (MaxWorkerThreads >= (DWORD)MinLimitTotalWorkerThreads &&
- MaxIOCompletionThreads >= (DWORD)MinLimitTotalCPThreads)
+ MaxIOCompletionThreads >= (DWORD)MinLimitTotalCPThreads &&
+ MaxWorkerThreads != 0 &&
+ MaxIOCompletionThreads != 0)
{
BEGIN_SO_INTOLERANT_CODE(GetThread());
@@ -2367,11 +2369,11 @@ Work:
counts = oldCounts;
}
- if (GCHeap::IsGCInProgress(TRUE))
+ if (GCHeapUtilities::IsGCInProgress(TRUE))
{
// GC is imminent, so wait until GC is complete before executing next request.
// this reduces in-flight objects allocated right before GC, easing the GC's work
- GCHeap::WaitForGCCompletion(TRUE);
+ GCHeapUtilities::WaitForGCCompletion(TRUE);
}
{
@@ -3677,6 +3679,8 @@ DWORD __stdcall ThreadpoolMgr::CompletionPortThreadStart(LPVOID lpArgs)
BOOL fThreadInit = FALSE;
Thread *pThread = NULL;
+ DWORD cpThreadWait = 0;
+
if (g_fEEStarted) {
pThread = SetupThreadNoThrow();
if (pThread == NULL) {
@@ -3711,7 +3715,7 @@ DWORD __stdcall ThreadpoolMgr::CompletionPortThreadStart(LPVOID lpArgs)
ThreadCounter::Counts oldCounts;
ThreadCounter::Counts newCounts;
- DWORD cpThreadWait = CP_THREAD_WAIT;
+ cpThreadWait = CP_THREAD_WAIT;
for (;; )
{
Top:
@@ -3986,7 +3990,7 @@ Top:
if (key != 0)
{
- if (GCHeap::IsGCInProgress(TRUE))
+ if (GCHeapUtilities::IsGCInProgress(TRUE))
{
//Indicate that this thread is free, and waiting on GC, not doing any user work.
//This helps in threads not getting injected when some threads have woken up from the
@@ -4003,7 +4007,7 @@ Top:
// GC is imminent, so wait until GC is complete before executing next request.
// this reduces in-flight objects allocated right before GC, easing the GC's work
- GCHeap::WaitForGCCompletion(TRUE);
+ GCHeapUtilities::WaitForGCCompletion(TRUE);
while (true)
{
@@ -4217,7 +4221,7 @@ BOOL ThreadpoolMgr::ShouldGrowCompletionPortThreadpool(ThreadCounter::Counts cou
if (counts.NumWorking >= counts.NumActive
&& NumCPInfrastructureThreads == 0
- && (counts.NumActive == 0 || !GCHeap::IsGCInProgress(TRUE))
+ && (counts.NumActive == 0 || !GCHeapUtilities::IsGCInProgress(TRUE))
)
{
// adjust limit if neeeded
@@ -4618,7 +4622,7 @@ DWORD __stdcall ThreadpoolMgr::GateThreadStart(LPVOID lpArgs)
EX_END_CATCH(SwallowAllExceptions);
}
- if (!GCHeap::IsGCInProgress(FALSE) )
+ if (!GCHeapUtilities::IsGCInProgress(FALSE) )
{
if (IgnoreNextSample)
{
@@ -4660,7 +4664,7 @@ DWORD __stdcall ThreadpoolMgr::GateThreadStart(LPVOID lpArgs)
oldCounts.NumActive < MaxLimitTotalCPThreads &&
!g_fCompletionPortDrainNeeded &&
NumCPInfrastructureThreads == 0 && // infrastructure threads count as "to be free as needed"
- !GCHeap::IsGCInProgress(TRUE))
+ !GCHeapUtilities::IsGCInProgress(TRUE))
{
BOOL status;
diff --git a/src/vm/win32threadpool.h b/src/vm/win32threadpool.h
index f712ef983d..6b4f1dfe07 100644
--- a/src/vm/win32threadpool.h
+++ b/src/vm/win32threadpool.h
@@ -505,22 +505,22 @@ public:
static BOOL UnregisterWaitEx(HANDLE hWaitObject,HANDLE CompletionEvent);
static void WaitHandleCleanup(HANDLE hWaitObject);
- static BOOL BindIoCompletionCallback(HANDLE FileHandle,
+ static BOOL WINAPI BindIoCompletionCallback(HANDLE FileHandle,
LPOVERLAPPED_COMPLETION_ROUTINE Function,
ULONG Flags,
DWORD& errorCode);
- static void WaitIOCompletionCallback(DWORD dwErrorCode,
+ static void WINAPI WaitIOCompletionCallback(DWORD dwErrorCode,
DWORD numBytesTransferred,
LPOVERLAPPED lpOverlapped);
- static VOID CallbackForInitiateDrainageOfCompletionPortQueue(
+ static VOID WINAPI CallbackForInitiateDrainageOfCompletionPortQueue(
DWORD dwErrorCode,
DWORD dwNumberOfBytesTransfered,
LPOVERLAPPED lpOverlapped
);
- static VOID CallbackForContinueDrainageOfCompletionPortQueue(
+ static VOID WINAPI CallbackForContinueDrainageOfCompletionPortQueue(
DWORD dwErrorCode,
DWORD dwNumberOfBytesTransfered,
LPOVERLAPPED lpOverlapped
diff --git a/src/vm/winrtredirector.h b/src/vm/winrtredirector.h
index 2561252ba9..f725ca8eb3 100644
--- a/src/vm/winrtredirector.h
+++ b/src/vm/winrtredirector.h
@@ -137,7 +137,7 @@ class WinRTDelegateRedirector
public:
static MethodTable *GetWinRTTypeForRedirectedDelegateIndex(WinMDAdapter::RedirectedTypeIndex index);
- static bool WinRTDelegateRedirector::ResolveRedirectedDelegate(MethodTable *pMT, WinMDAdapter::RedirectedTypeIndex *pIndex);
+ static bool ResolveRedirectedDelegate(MethodTable *pMT, WinMDAdapter::RedirectedTypeIndex *pIndex);
};
#endif // WINRT_DELEGATE_REDIRECTOR_H
diff --git a/src/vm/wks/wks.targets b/src/vm/wks/wks.targets
index 04562365f6..0df66fe558 100644
--- a/src/vm/wks/wks.targets
+++ b/src/vm/wks/wks.targets
@@ -107,6 +107,7 @@
<CppCompile Include="$(VmSourcesDir)\gcenv.os.cpp" />
<CppCompile Include="$(VmSourcesDir)\gchelpers.cpp" />
<CppCompile Include="$(VmSourcesDir)\gchost.cpp" />
+ <CppCompile Include="$(VmSourcesDir)\gcheaputilities.cpp" />
<CppCompile Include="$(VmSourcesDir)\genericdict.cpp" />
<CppCompile Include="$(VmSourcesDir)\generics.cpp" />
<CppCompile Include="$(VmSourcesDir)\genmeth.cpp" />